}
TEST(nntrainer_TensorDim, ctor_initializer_nhwc_n) {
-<<<<<<< HEAD
- EXPECT_THROW(nntrainer::TensorDim t({1, 2, 3, 4, 5}, NHWC_),
-=======
EXPECT_THROW(nntrainer::TensorDim t({1, 2, 3, 4, 5}, NHWC_, FP32_),
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
std::invalid_argument);
}
int status = ML_ERROR_NONE;
nntrainer::TensorDim tensor_dim;
-<<<<<<< HEAD
- status = tensor_dim.setTensorDim("1:2:3:4", NHWC_);
-=======
- status = tensor_dim.setTensorDim("1:2:3:4", NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
+ status = tensor_dim.setTensorDim("1:2:3:4", {NHWC_, FP32_});
EXPECT_EQ(status, ML_ERROR_NONE);
}
int status = ML_ERROR_NONE;
nntrainer::TensorDim tensor_dim;
-<<<<<<< HEAD
- status = tensor_dim.setTensorDim("1:2:3:4:5", NHWC_);
-=======
- status = tensor_dim.setTensorDim("1:2:3:4:5", NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
+ status = tensor_dim.setTensorDim("1:2:3:4:5", {NHWC_, FP32_});
EXPECT_EQ(status, ML_ERROR_INVALID_PARAMETER);
}
TEST(nntrainer_TensorDim, setTensorDim_03_nhwc_n) {
-<<<<<<< HEAD
- nntrainer::TensorDim d(NHWC_);
-=======
nntrainer::TensorDim d(NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
EXPECT_THROW(d.setTensorDim(0, 0), std::invalid_argument);
EXPECT_THROW(d.setTensorDim(1, 0), std::invalid_argument);
}
TEST(nntrainer_TensorDim, setTensorDim_04_nhwc_p) {
-<<<<<<< HEAD
- nntrainer::TensorDim d(NHWC_);
-=======
nntrainer::TensorDim d(NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
d.setTensorDim(0, 4);
d.setTensorDim(1, 5);
TEST(nntrainer_Tensor, Tensor_01_nhwc_p) {
int status = ML_ERROR_NONE;
-<<<<<<< HEAD
- nntrainer::Tensor tensor = nntrainer::Tensor(1, 2, 3, NHWC_);
-=======
nntrainer::Tensor tensor = nntrainer::Tensor(1, 2, 3, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
tensor.setZero();
ASSERT_NE(nullptr, tensor.getData());
if (tensor.getValue(0, 0, 0, 0) != 0.0)
in.push_back(ttv);
}
-<<<<<<< HEAD
- nntrainer::Tensor tensor = nntrainer::Tensor(in, NHWC_);
-=======
- nntrainer::Tensor tensor = nntrainer::Tensor(in, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
+ nntrainer::Tensor tensor = nntrainer::Tensor(in, {NHWC_, FP32_});
ASSERT_NE(nullptr, tensor.getData());
if (tensor.getValue(0, 1, 0, 0) != 1.0)
int height = 3;
int width = 10;
-<<<<<<< HEAD
- nntrainer::Tensor input(batch, channel, height, width, NHWC_);
-=======
nntrainer::Tensor input(batch, channel, height, width, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
GEN_TEST_INPUT_NHWC(input, i * (height * width * channel) +
j * (width * channel) + k * channel + l);
int height = 3;
int width = 10;
-<<<<<<< HEAD
- nntrainer::Tensor input(batch, channel, height, width, NHWC_);
-=======
nntrainer::Tensor input(batch, channel, height, width, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
GEN_TEST_INPUT_NHWC(input, i * (channel * height * width) +
j * (width * channel) + k * channel + l);
int height = 3;
int width = 10;
-<<<<<<< HEAD
- nntrainer::Tensor input(batch, channel, height, width, NHWC_);
- GEN_TEST_INPUT(input, i * (channel * height * width) + j * (width * channel) +
- k * channel + l);
-
- nntrainer::Tensor target2(batch, channel, height - 2, width - 1, NHWC_);
-=======
nntrainer::Tensor input(batch, channel, height, width, NHWC_, FP32_);
GEN_TEST_INPUT(input, i * (channel * height * width) + j * (width * channel) +
k * channel + l);
nntrainer::Tensor target2(batch, channel, height - 2, width - 1, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
status = input.multiply_i(target2);
EXPECT_EQ(status, ML_ERROR_INVALID_PARAMETER);
TEST(nntrainer_Tensor, multiply_i_broadcast_01_nhwc_p) {
{
-<<<<<<< HEAD
- nntrainer::TensorDim ref_dim(3, 2, 4, 5, NHWC_);
- nntrainer::Tensor t = ranged(3, 2, 4, 5, NHWC_);
- nntrainer::Tensor m = ranged(1, 2, 4, 5, NHWC_);
-=======
nntrainer::TensorDim ref_dim(3, 2, 4, 5, NHWC_, FP32_);
nntrainer::Tensor t = ranged(3, 2, 4, 5, NHWC_, FP32_);
nntrainer::Tensor m = ranged(1, 2, 4, 5, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
float answer_data[] = {
0, 1, 4, 9, 16, 25, 36, 49, 64, 81, 100, 121,
EXPECT_EQ(t, answer);
}
{
-<<<<<<< HEAD
- nntrainer::TensorDim ref_dim(3, 4, 5, 2, NHWC_);
- nntrainer::Tensor t = ranged(3, 4, 5, 2, NHWC_);
- nntrainer::Tensor m = ranged(3, 1, 5, 2, NHWC_);
-=======
nntrainer::TensorDim ref_dim(3, 4, 5, 2, NHWC_, FP32_);
nntrainer::Tensor t = ranged(3, 4, 5, 2, NHWC_, FP32_);
nntrainer::Tensor m = ranged(3, 1, 5, 2, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
float answer_data[] = {
0, 0, 0, 0, 4, 5, 6, 7, 16, 18, 20, 22,
36, 39, 42, 45, 64, 68, 72, 76, 100, 105, 110, 115,
EXPECT_EQ(t, answer);
}
{
-<<<<<<< HEAD
- nntrainer::TensorDim ref_dim(3, 4, 5, 2, NHWC_);
- nntrainer::Tensor t = ranged(3, 4, 5, 2, NHWC_);
- nntrainer::Tensor m = ranged(3, 4, 5, 1, NHWC_);
-=======
nntrainer::TensorDim ref_dim(3, 4, 5, 2, NHWC_, FP32_);
nntrainer::Tensor t = ranged(3, 4, 5, 2, NHWC_, FP32_);
nntrainer::Tensor m = ranged(3, 4, 5, 1, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
float answer_data[] = {
0, 1, 4, 9, 0, 5, 12, 21, 32, 45, 60, 77,
48, 65, 84, 105, 128, 153, 180, 209, 160, 189, 220, 253,
EXPECT_EQ(t, answer);
}
{
-<<<<<<< HEAD
- nntrainer::TensorDim ref_dim(3, 4, 5, 2, NHWC_);
- nntrainer::Tensor t = ranged(3, 4, 5, 2, NHWC_);
- nntrainer::Tensor m = ranged(3, 1, 1, 2, NHWC_);
-=======
nntrainer::TensorDim ref_dim(3, 4, 5, 2, NHWC_, FP32_);
nntrainer::Tensor t = ranged(3, 4, 5, 2, NHWC_, FP32_);
nntrainer::Tensor m = ranged(3, 1, 1, 2, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
float answer_data[] = {
0, 0, 0, 0, 4, 5, 6, 7, 0, 0, 0, 0, 12, 13,
14, 15, 0, 0, 0, 0, 20, 21, 22, 23, 0, 0, 0, 0,
EXPECT_EQ(t, answer);
}
{
-<<<<<<< HEAD
- nntrainer::TensorDim ref_dim(3, 4, 5, 2, NHWC_);
- nntrainer::Tensor t = ranged(3, 4, 5, 2, NHWC_);
- nntrainer::Tensor m = ranged(1, 4, 1, 2, NHWC_);
-=======
nntrainer::TensorDim ref_dim(3, 4, 5, 2, NHWC_, FP32_);
nntrainer::Tensor t = ranged(3, 4, 5, 2, NHWC_, FP32_);
nntrainer::Tensor m = ranged(1, 4, 1, 2, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
float answer_data[] = {
0, 1, 4, 9, 16, 25, 36, 49, 0, 9, 20, 33, 48, 65,
84, 105, 0, 17, 36, 57, 80, 105, 132, 161, 0, 25, 52, 81,
EXPECT_EQ(t, answer);
}
{
-<<<<<<< HEAD
- nntrainer::TensorDim ref_dim(3, 4, 5, 2, NHWC_);
- nntrainer::Tensor t = ranged(3, 4, 5, 2, NHWC_);
- nntrainer::Tensor m = ranged(3, 1, 5, 1, NHWC_);
-=======
nntrainer::TensorDim ref_dim(3, 4, 5, 2, NHWC_, FP32_);
nntrainer::Tensor t = ranged(3, 4, 5, 2, NHWC_, FP32_);
nntrainer::Tensor m = ranged(3, 1, 5, 1, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
float answer_data[] = {
0, 0, 0, 0, 0, 0, 0, 0, 8, 9, 10, 11,
12, 13, 14, 15, 32, 34, 36, 38, 40, 42, 44, 46,
EXPECT_EQ(t, answer);
}
{
-<<<<<<< HEAD
- nntrainer::TensorDim ref_dim(3, 4, 5, 2, NHWC_);
- nntrainer::Tensor t = ranged(3, 4, 5, 2, NHWC_);
- nntrainer::Tensor m = ranged(1, 1, 1, 2, NHWC_);
-=======
nntrainer::TensorDim ref_dim(3, 4, 5, 2, NHWC_, FP32_);
nntrainer::Tensor t = ranged(3, 4, 5, 2, NHWC_, FP32_);
nntrainer::Tensor m = ranged(1, 1, 1, 2, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
float answer_data[] = {
0, 0, 0, 0, 4, 5, 6, 7, 0, 0, 0, 0, 12, 13, 14,
15, 0, 0, 0, 0, 20, 21, 22, 23, 0, 0, 0, 0, 28, 29,
EXPECT_EQ(t, answer);
}
{
-<<<<<<< HEAD
- nntrainer::TensorDim ref_dim(3, 4, 5, 2, NHWC_);
- nntrainer::Tensor t = ranged(3, 4, 5, 2, NHWC_);
- nntrainer::Tensor m = ranged(1, 4, 1, 1, NHWC_);
-=======
nntrainer::TensorDim ref_dim(3, 4, 5, 2, NHWC_, FP32_);
nntrainer::Tensor t = ranged(3, 4, 5, 2, NHWC_, FP32_);
nntrainer::Tensor m = ranged(1, 4, 1, 1, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
float answer_data[] = {
0, 1, 4, 9, 0, 5, 12, 21, 0, 9, 20, 33, 0, 13, 28, 45,
0, 17, 36, 57, 0, 21, 44, 69, 0, 25, 52, 81, 0, 29, 60, 93,
EXPECT_EQ(t, answer);
}
{
-<<<<<<< HEAD
- nntrainer::TensorDim ref_dim(3, 4, 5, 2, NHWC_);
- nntrainer::Tensor t = ranged(3, 4, 5, 2, NHWC_);
- nntrainer::Tensor m = ranged(3, 1, 1, 1, NHWC_);
-=======
nntrainer::TensorDim ref_dim(3, 4, 5, 2, NHWC_, FP32_);
nntrainer::Tensor t = ranged(3, 4, 5, 2, NHWC_, FP32_);
nntrainer::Tensor m = ranged(3, 1, 1, 1, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
float answer_data[] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
EXPECT_EQ(t, answer);
}
{
-<<<<<<< HEAD
- nntrainer::TensorDim ref_dim(3, 5, 1, 4, NHWC_);
- nntrainer::Tensor t = ranged(3, 5, 1, 4, NHWC_);
- nntrainer::Tensor m = ranged(3, 1, 1, 4, NHWC_);
-=======
nntrainer::TensorDim ref_dim(3, 5, 1, 4, NHWC_, FP32_);
nntrainer::Tensor t = ranged(3, 5, 1, 4, NHWC_, FP32_);
nntrainer::Tensor m = ranged(3, 1, 1, 4, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
float answer_data[] = {0, 0, 0, 0, 0, 5, 6, 7, 8, 9,
20, 22, 24, 26, 28, 45, 48, 51, 54, 57,
80, 84, 88, 92, 96, 125, 130, 135, 140, 145,
}
TEST(nntrainer_Tensor, multiply_i_broadcast_not_supported_01_nhwc_n) {
-<<<<<<< HEAD
- nntrainer::Tensor target(3, 1, 3, 1, NHWC_);
- nntrainer::Tensor target2(3, 1, 3, 3, NHWC_);
-=======
nntrainer::Tensor target(3, 1, 3, 1, NHWC_, FP32_);
nntrainer::Tensor target2(3, 1, 3, 3, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
EXPECT_EQ(target.multiply_i(target2), ML_ERROR_INVALID_PARAMETER);
}
TEST(nntrainer_Tensor, multiply_i_broadcast_not_broadcastable_02_nhwc_n) {
-<<<<<<< HEAD
- nntrainer::Tensor target(3, 2, 4, 5, NHWC_);
- nntrainer::Tensor target2(3, 2, 3, 1, NHWC_);
-=======
nntrainer::Tensor target(3, 2, 4, 5, NHWC_, FP32_);
nntrainer::Tensor target2(3, 2, 3, 1, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
EXPECT_EQ(target.multiply_i(target2), ML_ERROR_INVALID_PARAMETER);
}
int height = 3;
int width = 10;
-<<<<<<< HEAD
- nntrainer::Tensor input(batch, channel, height, width, NHWC_);
-=======
nntrainer::Tensor input(batch, channel, height, width, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
GEN_TEST_INPUT_NHWC(input, i * (height * width * channel) +
j * (width * channel) + k * channel + l);
int height = 3;
int width = 10;
-<<<<<<< HEAD
- nntrainer::Tensor input(batch, channel, height, width, NHWC_);
-=======
nntrainer::Tensor input(batch, channel, height, width, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
GEN_TEST_INPUT(input,
i * (batch * height * width) + j * (height * width) + k + 1);
int height = 3;
int width = 10;
-<<<<<<< HEAD
- nntrainer::Tensor input(batch, channel, height, width, NHWC_);
- GEN_TEST_INPUT(input,
- i * (height * width) + j * (height * width) + k * width + l);
-
- nntrainer::Tensor test(batch - 1, height - 1, width - 1, NHWC_);
-=======
nntrainer::Tensor input(batch, channel, height, width, NHWC_, FP32_);
GEN_TEST_INPUT(input,
i * (height * width) + j * (height * width) + k * width + l);
nntrainer::Tensor test(batch - 1, height - 1, width - 1, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
EXPECT_THROW({ input.multiply(test); }, std::invalid_argument);
}
int height = 3;
int width = 10;
-<<<<<<< HEAD
- nntrainer::TensorDim dim(batch, channel, height, width, NHWC_);
-
- nntrainer::Tensor input(batch, 2 * channel, height, width, NHWC_);
-=======
nntrainer::TensorDim dim(batch, channel, height, width, NHWC_, FP32_);
nntrainer::Tensor input(batch, 2 * channel, height, width, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
nntrainer::Tensor shared_input = input.getSharedDataTensor(dim, 0, false);
nntrainer::Tensor test(dim);
// shared_input is not continuous
int height = 3;
int width = 10;
-<<<<<<< HEAD
- nntrainer::TensorDim dim(batch, channel, height, width, NHWC_);
-
- nntrainer::Tensor input(dim);
- nntrainer::Tensor test(batch, 2 * channel, height, width, NHWC_);
-=======
nntrainer::TensorDim dim(batch, channel, height, width, NHWC_, FP32_);
nntrainer::Tensor input(dim);
nntrainer::Tensor test(batch, 2 * channel, height, width, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
nntrainer::Tensor shared_test = test.getSharedDataTensor(dim, 0, false);
EXPECT_THROW(input.multiply(shared_test), std::invalid_argument);
int height = 3;
int width = 10;
-<<<<<<< HEAD
- nntrainer::TensorDim dim(batch, channel, height, width, NHWC_);
-=======
nntrainer::TensorDim dim(batch, channel, height, width, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
// input is not allocated now : alloc_now == false
nntrainer::Tensor input(dim, false);
nntrainer::Tensor test(dim);
int height = 3;
int width = 10;
-<<<<<<< HEAD
- nntrainer::TensorDim dim(batch, channel, height, width, NHWC_);
-=======
nntrainer::TensorDim dim(batch, channel, height, width, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
nntrainer::Tensor input(dim);
GEN_TEST_INPUT(input, i * (height * width * channel) + j * (width * channel) +
int height = 3;
int width = 10;
-<<<<<<< HEAD
- nntrainer::TensorDim dim(batch, channel, height, width, NHWC_);
-=======
nntrainer::TensorDim dim(batch, channel, height, width, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
nntrainer::Tensor input(dim);
GEN_TEST_INPUT(input, i * (height * width * channel) + j * (width * channel) +
int height = 3;
int width = 10;
-<<<<<<< HEAD
- nntrainer::Tensor input(batch, channel, height, width, NHWC_);
- GEN_TEST_INPUT_NHWC(input, i * (height * width * channel) +
- j * (width * channel) + k * channel + 1);
-
- nntrainer::Tensor expected(batch, channel, height, width, NHWC_);
-=======
nntrainer::Tensor input(batch, channel, height, width, NHWC_, FP32_);
GEN_TEST_INPUT_NHWC(input, i * (height * width * channel) +
j * (width * channel) + k * channel + 1);
nntrainer::Tensor expected(batch, channel, height, width, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
GEN_TEST_INPUT_NHWC(expected, (i * (height * width * channel) +
j * (width * channel) + k * channel + 1) *
2);
int height = 3;
int width = 10;
-<<<<<<< HEAD
- nntrainer::Tensor input(batch, channel, height, width, NHWC_);
-=======
nntrainer::Tensor input(batch, channel, height, width, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
GEN_TEST_INPUT_NHWC(input, i * (height * width * channel) +
j * (width * channel) + k * channel + l);
int height = 3;
int width = 10;
-<<<<<<< HEAD
- nntrainer::Tensor input(batch, channel, height, width, NHWC_);
-=======
nntrainer::Tensor input(batch, channel, height, width, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
GEN_TEST_INPUT_NHWC(input, i * (height * width * channel) +
j * (width * channel) + k * channel + l + 1);
int height = 3;
int width = 10;
-<<<<<<< HEAD
- nntrainer::Tensor input(batch, channel, height, width, NHWC_);
-=======
nntrainer::Tensor input(batch, channel, height, width, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
GEN_TEST_INPUT_NHWC(input, i * (height * width * channel) +
j * (width * channel) + k * channel + l + 1);
int height = 3;
int width = 10;
-<<<<<<< HEAD
- nntrainer::Tensor input(batch, channel, height, width, NHWC_);
- GEN_TEST_INPUT_NHWC(input, i * (height * width * channel) +
- j * (width * channel) + k * channel + l + 1);
-
- nntrainer::Tensor original(batch, channel, height - 2, width - 1, NHWC_);
-=======
nntrainer::Tensor input(batch, channel, height, width, NHWC_, FP32_);
GEN_TEST_INPUT_NHWC(input, i * (height * width * channel) +
j * (width * channel) + k * channel + l + 1);
nntrainer::Tensor original(batch, channel, height - 2, width - 1, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
status = input.divide_i(original);
EXPECT_EQ(status, ML_ERROR_INVALID_PARAMETER);
int height = 3;
int width = 10;
-<<<<<<< HEAD
- nntrainer::Tensor input(batch, channel, height, width, NHWC_);
-=======
nntrainer::Tensor input(batch, channel, height, width, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
GEN_TEST_INPUT_NHWC(input, i * (height * width * channel) +
j * (width * channel) + k * channel + l + 1);
int height = 3;
int width = 10;
-<<<<<<< HEAD
- nntrainer::Tensor input(batch, channel, height, width, NHWC_);
-=======
nntrainer::Tensor input(batch, channel, height, width, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
GEN_TEST_INPUT_NHWC(input, i * (height * width * channel) +
j * (width * channel) + k * channel + l + 1);
int height = 3;
int width = 10;
-<<<<<<< HEAD
- nntrainer::Tensor input(batch, channel, height, width, NHWC_);
- GEN_TEST_INPUT_NHWC(input, i * (height * width * channel) +
- j * (width * channel) + k * channel + l + 1);
-
- nntrainer::Tensor test(batch - 1, channel - 1, height - 1, width - 1, NHWC_);
-=======
nntrainer::Tensor input(batch, channel, height, width, NHWC_, FP32_);
GEN_TEST_INPUT_NHWC(input, i * (height * width * channel) +
j * (width * channel) + k * channel + l + 1);
nntrainer::Tensor test(batch - 1, channel - 1, height - 1, width - 1, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
EXPECT_THROW({ input.divide(test); }, std::invalid_argument);
}
int height = 3;
int width = 10;
-<<<<<<< HEAD
- nntrainer::TensorDim dim(batch, channel, height, width, NHWC_);
-
- nntrainer::Tensor input(batch, 2 * channel, height, width, NHWC_);
-=======
nntrainer::TensorDim dim(batch, channel, height, width, NHWC_, FP32_);
nntrainer::Tensor input(batch, 2 * channel, height, width, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
nntrainer::Tensor shared_input = input.getSharedDataTensor(dim, 0, false);
nntrainer::Tensor test(dim);
int height = 3;
int width = 10;
-<<<<<<< HEAD
- nntrainer::TensorDim dim(batch, channel, height, width, NHWC_);
-
- nntrainer::Tensor input(dim);
- nntrainer::Tensor test(batch, 2 * channel, height, width, NHWC_);
-=======
nntrainer::TensorDim dim(batch, channel, height, width, NHWC_, FP32_);
nntrainer::Tensor input(dim);
nntrainer::Tensor test(batch, 2 * channel, height, width, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
nntrainer::Tensor shared_test = test.getSharedDataTensor(dim, 0, false);
EXPECT_THROW(input.divide(shared_test), std::invalid_argument);
int height = 3;
int width = 10;
-<<<<<<< HEAD
- nntrainer::TensorDim dim(batch, channel, height, width, NHWC_);
-=======
nntrainer::TensorDim dim(batch, channel, height, width, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
nntrainer::Tensor input(dim, false);
nntrainer::Tensor test(dim);
int height = 3;
int width = 10;
-<<<<<<< HEAD
- nntrainer::TensorDim dim(batch, channel, height, width, NHWC_);
-=======
nntrainer::TensorDim dim(batch, channel, height, width, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
nntrainer::Tensor input(dim);
GEN_TEST_INPUT_NHWC(input, i * (height * width * channel) +
int height = 3;
int width = 10;
-<<<<<<< HEAD
- nntrainer::TensorDim dim(batch, channel, height, width, NHWC_);
-=======
nntrainer::TensorDim dim(batch, channel, height, width, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
nntrainer::Tensor input(dim);
GEN_TEST_INPUT_NHWC(input, i * (height * width * channel) +
TEST(nntrainer_Tensor, divide_i_broadcast_01_nhwc_p) {
{
-<<<<<<< HEAD
- nntrainer::TensorDim ref_dim(3, 2, 4, 5, NHWC_);
- nntrainer::Tensor t = ranged(3, 2, 4, 5, NHWC_);
- t.add_i(1);
- nntrainer::Tensor m = ranged(1, 2, 4, 5, NHWC_);
-=======
nntrainer::TensorDim ref_dim(3, 2, 4, 5, NHWC_, FP32_);
nntrainer::Tensor t = ranged(3, 2, 4, 5, NHWC_, FP32_);
t.add_i(1);
nntrainer::Tensor m = ranged(1, 2, 4, 5, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
m.add_i(1);
float answer_data[] = {
1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
EXPECT_EQ(t, answer);
}
{
-<<<<<<< HEAD
- nntrainer::TensorDim ref_dim(3, 2, 4, 5, NHWC_);
- nntrainer::Tensor t = ranged(3, 2, 4, 5, NHWC_);
- t.add_i(1);
- nntrainer::Tensor m = ranged(3, 1, 4, 5, NHWC_);
-=======
nntrainer::TensorDim ref_dim(3, 2, 4, 5, NHWC_, FP32_);
nntrainer::Tensor t = ranged(3, 2, 4, 5, NHWC_, FP32_);
t.add_i(1);
nntrainer::Tensor m = ranged(3, 1, 4, 5, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
m.add_i(1);
float answer_data[] = {
EXPECT_EQ(t, answer);
}
{
-<<<<<<< HEAD
- nntrainer::TensorDim ref_dim(3, 2, 4, 5, NHWC_);
- nntrainer::Tensor t = ranged(3, 2, 4, 5, NHWC_);
- t.add_i(1);
- nntrainer::Tensor m = ranged(3, 2, 1, 1, NHWC_);
-=======
nntrainer::TensorDim ref_dim(3, 2, 4, 5, NHWC_, FP32_);
nntrainer::Tensor t = ranged(3, 2, 4, 5, NHWC_, FP32_);
t.add_i(1);
nntrainer::Tensor m = ranged(3, 2, 1, 1, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
m.add_i(1);
float answer_data[] = {
1, 1, 3, 2, 5, 3,
EXPECT_EQ(t, answer);
}
{
-<<<<<<< HEAD
- nntrainer::TensorDim ref_dim(3, 2, 4, 5, NHWC_);
- nntrainer::Tensor t = ranged(3, 2, 4, 5, NHWC_);
- t.add_i(1);
- nntrainer::Tensor m = ranged(1, 2, 4, 1, NHWC_);
-=======
nntrainer::TensorDim ref_dim(3, 2, 4, 5, NHWC_, FP32_);
nntrainer::Tensor t = ranged(3, 2, 4, 5, NHWC_, FP32_);
t.add_i(1);
nntrainer::Tensor m = ranged(1, 2, 4, 1, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
m.add_i(1);
float answer_data[] = {
1, 1, 3, 2, 5, 3,
EXPECT_EQ(t, answer);
}
{
-<<<<<<< HEAD
- nntrainer::TensorDim ref_dim(3, 2, 4, 5, NHWC_);
- nntrainer::Tensor t = ranged(3, 2, 4, 5, NHWC_);
- t.add_i(1);
- nntrainer::Tensor m = ranged(3, 1, 1, 5, NHWC_);
-=======
nntrainer::TensorDim ref_dim(3, 2, 4, 5, NHWC_, FP32_);
nntrainer::Tensor t = ranged(3, 2, 4, 5, NHWC_, FP32_);
t.add_i(1);
nntrainer::Tensor m = ranged(3, 1, 1, 5, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
m.add_i(1);
float answer_data[] = {
1, 2, 1.5, 2, 1.6666666, 2,
EXPECT_EQ(t, answer);
}
{
-<<<<<<< HEAD
- nntrainer::TensorDim ref_dim(3, 2, 4, 5, NHWC_);
- nntrainer::Tensor t = ranged(3, 2, 4, 5, NHWC_);
- t.add_i(1);
- nntrainer::Tensor m = ranged(1, 2, 1, 1, NHWC_);
-=======
nntrainer::TensorDim ref_dim(3, 2, 4, 5, NHWC_, FP32_);
nntrainer::Tensor t = ranged(3, 2, 4, 5, NHWC_, FP32_);
t.add_i(1);
nntrainer::Tensor m = ranged(1, 2, 1, 1, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
m.add_i(1);
float answer_data[] = {
EXPECT_EQ(t, answer);
}
{
-<<<<<<< HEAD
- nntrainer::TensorDim ref_dim(3, 2, 4, 5, NHWC_);
- nntrainer::Tensor t = ranged(3, 2, 4, 5, NHWC_);
- t.add_i(1);
- nntrainer::Tensor m = ranged(1, 1, 4, 1, NHWC_);
-=======
nntrainer::TensorDim ref_dim(3, 2, 4, 5, NHWC_, FP32_);
nntrainer::Tensor t = ranged(3, 2, 4, 5, NHWC_, FP32_);
t.add_i(1);
nntrainer::Tensor m = ranged(1, 1, 4, 1, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
m.add_i(1);
float answer_data[] = {
1, 2, 3, 4, 5, 6,
EXPECT_EQ(t, answer);
}
{
-<<<<<<< HEAD
- nntrainer::TensorDim ref_dim(3, 2, 4, 5, NHWC_);
- nntrainer::Tensor t = ranged(3, 2, 4, 5, NHWC_);
- t.add_i(1);
- nntrainer::Tensor m = ranged(3, 1, 1, 1, NHWC_);
-=======
nntrainer::TensorDim ref_dim(3, 2, 4, 5, NHWC_, FP32_);
nntrainer::Tensor t = ranged(3, 2, 4, 5, NHWC_, FP32_);
t.add_i(1);
nntrainer::Tensor m = ranged(3, 1, 1, 1, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
m.add_i(1);
float answer_data[] = {
EXPECT_EQ(t, answer);
}
{
-<<<<<<< HEAD
- nntrainer::TensorDim ref_dim(3, 2, 5, 1, NHWC_);
- nntrainer::Tensor t = ranged(3, 2, 5, 1, NHWC_);
- t.add_i(1);
- nntrainer::Tensor m = ranged(3, 2, 1, 1, NHWC_);
-=======
nntrainer::TensorDim ref_dim(3, 2, 5, 1, NHWC_, FP32_);
nntrainer::Tensor t = ranged(3, 2, 5, 1, NHWC_, FP32_);
t.add_i(1);
nntrainer::Tensor m = ranged(3, 2, 1, 1, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
m.add_i(1);
float answer_data[] = {
1, 1, 3, 2, 5, 3, 7, 4, 9, 5,
}
TEST(nntrainer_Tensor, divide_i_broadcast_not_supported_01_nhwc_n) {
-<<<<<<< HEAD
- nntrainer::Tensor target(3, 1, 3, 1, NHWC_);
- nntrainer::Tensor target2(3, 1, 3, 3, NHWC_);
-=======
nntrainer::Tensor target(3, 1, 3, 1, NHWC_, FP32_);
nntrainer::Tensor target2(3, 1, 3, 3, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
EXPECT_EQ(target.divide_i(target2), ML_ERROR_INVALID_PARAMETER);
}
TEST(nntrainer_Tensor, divide_i_broadcast_not_broadcastable_02_nhwc_n) {
-<<<<<<< HEAD
- nntrainer::Tensor target(3, 2, 4, 5, NHWC_);
- nntrainer::Tensor target2(3, 2, 3, 1, NHWC_);
-=======
nntrainer::Tensor target(3, 2, 4, 5, NHWC_, FP32_);
nntrainer::Tensor target2(3, 2, 3, 1, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
EXPECT_EQ(target.divide_i(target2), ML_ERROR_INVALID_PARAMETER);
}
int width = 10;
int channel = 1;
-<<<<<<< HEAD
- nntrainer::Tensor target(batch, channel, height, width, NHWC_);
- GEN_TEST_INPUT(target, i * (batch * height) + j * (width) + k + 1);
-
- nntrainer::Tensor target2(batch, height - 2, width - 3, NHWC_);
-=======
nntrainer::Tensor target(batch, channel, height, width, NHWC_, FP32_);
GEN_TEST_INPUT(target, i * (batch * height) + j * (width) + k + 1);
nntrainer::Tensor target2(batch, height - 2, width - 3, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
status = target.add_i(target2);
EXPECT_EQ(status, ML_ERROR_INVALID_PARAMETER);
int width = 10;
int channel = 3;
-<<<<<<< HEAD
- nntrainer::Tensor target(batch, height, width, channel, NHWC_);
-=======
nntrainer::Tensor target(batch, height, width, channel, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
GEN_TEST_INPUT_NHWC(target, i * (height * width * channel) +
j * (width * channel) + k * channel + 1 + l);
int width = 10;
int channel = 2;
-<<<<<<< HEAD
- nntrainer::Tensor target(batch, height, width, channel, NHWC_);
- GEN_TEST_INPUT_NHWC(target, i * (height * width * channel) +
- j * (width * channel) + k * channel + 1);
-
- nntrainer::Tensor original(height, width, channel, NHWC_);
-=======
nntrainer::Tensor target(batch, height, width, channel, NHWC_, FP32_);
GEN_TEST_INPUT_NHWC(target, i * (height * width * channel) +
j * (width * channel) + k * channel + 1);
nntrainer::Tensor original(height, width, channel, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
original.copy(target);
status = target.add_i(target, 3.0);
}
TEST(nntrainer_Tensor, add_i_broadcast_01_nhwc_p) {
-<<<<<<< HEAD
- nntrainer::TensorDim ref_dim(3, 4, 5, 2, NHWC_);
- {
- nntrainer::Tensor t = ranged(3, 4, 5, 2, NHWC_);
- nntrainer::Tensor m = ranged(1, 4, 5, 2, NHWC_);
-=======
nntrainer::TensorDim ref_dim(3, 4, 5, 2, NHWC_, FP32_);
{
nntrainer::Tensor t = ranged(3, 4, 5, 2, NHWC_, FP32_);
nntrainer::Tensor m = ranged(1, 4, 5, 2, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
float answer_data[] = {
0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26,
28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 48, 50, 52, 54,
EXPECT_EQ(t, answer);
}
{
-<<<<<<< HEAD
- nntrainer::Tensor t = ranged(3, 4, 5, 2, NHWC_);
- nntrainer::Tensor m = ranged(3, 1, 5, 2, NHWC_);
-=======
nntrainer::Tensor t = ranged(3, 4, 5, 2, NHWC_, FP32_);
nntrainer::Tensor m = ranged(3, 1, 5, 2, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
float answer_data[] = {
0, 1, 2, 3, 5, 6, 7, 8, 10, 11, 12, 13, 15, 16,
17, 18, 20, 21, 22, 23, 25, 26, 27, 28, 30, 31, 32, 33,
EXPECT_EQ(t, answer);
}
{
-<<<<<<< HEAD
- nntrainer::Tensor t = ranged(3, 4, 5, 2, NHWC_);
- nntrainer::Tensor m = ranged(3, 4, 5, 1, NHWC_);
-=======
nntrainer::Tensor t = ranged(3, 4, 5, 2, NHWC_, FP32_);
nntrainer::Tensor m = ranged(3, 4, 5, 1, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
float answer_data[] = {
0, 2, 4, 6, 4, 6, 8, 10, 12, 14, 16, 18, 16, 18,
20, 22, 24, 26, 28, 30, 28, 30, 32, 34, 36, 38, 40, 42,
EXPECT_EQ(t, answer);
}
{
-<<<<<<< HEAD
- nntrainer::Tensor t = ranged(3, 4, 5, 2, NHWC_);
- nntrainer::Tensor m = ranged(3, 1, 1, 2, NHWC_);
-=======
nntrainer::Tensor t = ranged(3, 4, 5, 2, NHWC_, FP32_);
nntrainer::Tensor m = ranged(3, 1, 1, 2, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
float answer_data[] = {
0, 1, 2, 3, 5, 6, 7, 8, 8, 9, 10, 11, 13, 14,
15, 16, 16, 17, 18, 19, 21, 22, 23, 24, 24, 25, 26, 27,
EXPECT_EQ(t, answer);
}
{
-<<<<<<< HEAD
- nntrainer::Tensor t = ranged(3, 4, 5, 2, NHWC_);
- nntrainer::Tensor m = ranged(1, 4, 1, 2, NHWC_);
-=======
nntrainer::Tensor t = ranged(3, 4, 5, 2, NHWC_, FP32_);
nntrainer::Tensor m = ranged(1, 4, 1, 2, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
float answer_data[] = {
0, 2, 4, 6, 8, 10, 12, 14, 8, 10, 12, 14, 16, 18,
20, 22, 16, 18, 20, 22, 24, 26, 28, 30, 24, 26, 28, 30,
EXPECT_EQ(t, answer);
}
{
-<<<<<<< HEAD
- nntrainer::Tensor t = ranged(3, 4, 5, 2, NHWC_);
- nntrainer::Tensor m = ranged(3, 1, 5, 1, NHWC_);
-=======
nntrainer::Tensor t = ranged(3, 4, 5, 2, NHWC_, FP32_);
nntrainer::Tensor m = ranged(3, 1, 5, 1, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
float answer_data[] = {
0, 1, 2, 3, 4, 5, 6, 7, 9, 10, 11, 12, 13, 14,
15, 16, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 29, 30,
EXPECT_EQ(t, answer);
}
{
-<<<<<<< HEAD
- nntrainer::Tensor t = ranged(3, 4, 5, 2, NHWC_);
- nntrainer::Tensor m = ranged(1, 1, 1, 2, NHWC_);
-=======
nntrainer::Tensor t = ranged(3, 4, 5, 2, NHWC_, FP32_);
nntrainer::Tensor m = ranged(1, 1, 1, 2, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
float answer_data[] = {
0, 1, 2, 3, 5, 6, 7, 8, 8, 9, 10, 11, 13, 14,
15, 16, 16, 17, 18, 19, 21, 22, 23, 24, 24, 25, 26, 27,
EXPECT_EQ(t, answer);
}
{
-<<<<<<< HEAD
- nntrainer::Tensor t = ranged(3, 4, 5, 2, NHWC_);
- nntrainer::Tensor m = ranged(1, 4, 1, 1, NHWC_);
-=======
nntrainer::Tensor t = ranged(3, 4, 5, 2, NHWC_, FP32_);
nntrainer::Tensor m = ranged(1, 4, 1, 1, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
float answer_data[] = {
0, 2, 4, 6, 4, 6, 8, 10, 8, 10, 12, 14, 12, 14,
16, 18, 16, 18, 20, 22, 20, 22, 24, 26, 24, 26, 28, 30,
EXPECT_EQ(t, answer);
}
{
-<<<<<<< HEAD
- nntrainer::Tensor t = ranged(3, 4, 5, 2, NHWC_);
- nntrainer::Tensor m = ranged(3, 1, 1, 1, NHWC_);
-=======
nntrainer::Tensor t = ranged(3, 4, 5, 2, NHWC_, FP32_);
nntrainer::Tensor m = ranged(3, 1, 1, 1, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
float answer_data[] = {
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27,
EXPECT_EQ(t, answer);
}
{
-<<<<<<< HEAD
- nntrainer::Tensor t = ranged(3, 4, 5, 2, NHWC_);
- nntrainer::Tensor m = ranged(1, 1, 1, 1, NHWC_);
-=======
nntrainer::Tensor t = ranged(3, 4, 5, 2, NHWC_, FP32_);
nntrainer::Tensor m = ranged(1, 1, 1, 1, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
m.add_i(1.0);
float answer_data[] = {
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
EXPECT_EQ(t, answer);
}
{
-<<<<<<< HEAD
- nntrainer::TensorDim ref_dim(3, 5, 1, 4, NHWC_);
- nntrainer::Tensor t = ranged(3, 5, 1, 4, NHWC_);
- nntrainer::Tensor m = ranged(3, 1, 1, 4, NHWC_);
-=======
nntrainer::TensorDim ref_dim(3, 5, 1, 4, NHWC_, FP32_);
nntrainer::Tensor t = ranged(3, 5, 1, 4, NHWC_, FP32_);
nntrainer::Tensor m = ranged(3, 1, 1, 4, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
float answer_data[] = {0, 1, 2, 3, 4, 6, 7, 8, 9, 10, 12, 13,
14, 15, 16, 18, 19, 20, 21, 22, 24, 25, 26, 27,
28, 30, 31, 32, 33, 34, 36, 37, 38, 39, 40, 42,
EXPECT_EQ(t, answer);
}
{
-<<<<<<< HEAD
- nntrainer::TensorDim ref_dim(1, 1, 2, 1, NHWC_);
- nntrainer::Tensor t = ranged(1, 1, 2, 1, NHWC_);
- nntrainer::Tensor m = ranged(1, 1, 2, 1, NHWC_);
-=======
nntrainer::TensorDim ref_dim(1, 1, 2, 1, NHWC_, FP32_);
nntrainer::Tensor t = ranged(1, 1, 2, 1, NHWC_, FP32_);
nntrainer::Tensor m = ranged(1, 1, 2, 1, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
float answer_data[] = {0.0, 2.0};
nntrainer::Tensor answer(ref_dim, answer_data);
int status = t.add_i(m);
EXPECT_EQ(t, answer);
}
{
-<<<<<<< HEAD
- nntrainer::TensorDim ref_dim(16, 1, 1, 1, NHWC_);
- nntrainer::Tensor t = ranged(16, 1, 1, 1, NHWC_);
- nntrainer::Tensor m = ranged(1, 1, 1, 1, NHWC_);
-=======
nntrainer::TensorDim ref_dim(16, 1, 1, 1, NHWC_, FP32_);
nntrainer::Tensor t = ranged(16, 1, 1, 1, NHWC_, FP32_);
nntrainer::Tensor m = ranged(1, 1, 1, 1, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
float answer_data[] = {0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0,
8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0};
nntrainer::Tensor answer(ref_dim, answer_data);
}
TEST(nntrainer_Tensor, add_i_broadcast_not_supported_01_nhwc_n) {
-<<<<<<< HEAD
- nntrainer::Tensor target(3, 1, 3, 1, NHWC_);
- nntrainer::Tensor target2(3, 1, 3, 3, NHWC_);
-=======
nntrainer::Tensor target(3, 1, 3, 1, NHWC_, FP32_);
nntrainer::Tensor target2(3, 1, 3, 3, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
EXPECT_EQ(target.add_i(target2), ML_ERROR_INVALID_PARAMETER);
}
TEST(nntrainer_Tensor, add_i_broadcast_not_broadcastable_02_nhwc_n) {
-<<<<<<< HEAD
- nntrainer::Tensor target(3, 2, 4, 5, NHWC_);
- nntrainer::Tensor target2(3, 2, 3, 1, NHWC_);
-=======
nntrainer::Tensor target(3, 2, 4, 5, NHWC_, FP32_);
nntrainer::Tensor target2(3, 2, 3, 1, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
EXPECT_EQ(target.add_i(target2), ML_ERROR_INVALID_PARAMETER);
}
int height = 3;
int width = 10;
-<<<<<<< HEAD
- nntrainer::Tensor input(batch, channel, height, width, NHWC_);
-=======
nntrainer::Tensor input(batch, channel, height, width, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
GEN_TEST_INPUT_NHWC(input, i * (height * width * channel) +
j * (width * channel) + k * channel + l);
int height = 3;
int width = 10;
-<<<<<<< HEAD
- nntrainer::Tensor input(batch, channel, height, width, NHWC_);
-=======
nntrainer::Tensor input(batch, channel, height, width, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
GEN_TEST_INPUT_NHWC(input, i * (height * width * channel) +
j * (width * channel) + k * channel + 1);
nntrainer::Tensor result = input.add(input);
int height = 3;
int width = 10;
-<<<<<<< HEAD
- nntrainer::Tensor input(batch, channel, height, width, NHWC_);
- GEN_TEST_INPUT_NHWC(input, i * (height * width * channel) +
- j * (width * channel) + k * channel + 1);
-
- nntrainer::Tensor test(batch - 1, height - 1, width - 1, channel, NHWC_);
-=======
nntrainer::Tensor input(batch, channel, height, width, NHWC_, FP32_);
GEN_TEST_INPUT_NHWC(input, i * (height * width * channel) +
j * (width * channel) + k * channel + 1);
nntrainer::Tensor test(batch - 1, height - 1, width - 1, channel, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
EXPECT_THROW({ input.add(test); }, std::invalid_argument);
}
int height = 3;
int width = 10;
-<<<<<<< HEAD
- nntrainer::TensorDim dim(batch, channel, height, width, NHWC_);
-
- nntrainer::Tensor input(batch, channel * 2, height, width, NHWC_);
-=======
nntrainer::TensorDim dim(batch, channel, height, width, NHWC_, FP32_);
nntrainer::Tensor input(batch, channel * 2, height, width, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
nntrainer::Tensor shared_input = input.getSharedDataTensor(dim, 0, false);
nntrainer::Tensor test(dim);
int height = 3;
int width = 10;
-<<<<<<< HEAD
- nntrainer::TensorDim dim(batch, height, width, channel, NHWC_);
-
- nntrainer::Tensor input(dim);
- nntrainer::Tensor test(batch, height, width, channel * 2, NHWC_);
-=======
nntrainer::TensorDim dim(batch, height, width, channel, NHWC_, FP32_);
nntrainer::Tensor input(dim);
nntrainer::Tensor test(batch, height, width, channel * 2, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
nntrainer::Tensor shared_test = test.getSharedDataTensor(dim, 0, false);
EXPECT_THROW(input.add(shared_test), std::invalid_argument);
int height = 3;
int width = 10;
-<<<<<<< HEAD
- nntrainer::TensorDim dim(batch, height, width, channel, NHWC_);
-=======
nntrainer::TensorDim dim(batch, height, width, channel, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
nntrainer::Tensor input(dim, false);
nntrainer::Tensor test(dim);
int height = 3;
int width = 10;
-<<<<<<< HEAD
- nntrainer::TensorDim dim(batch, height, width, channel, NHWC_);
-=======
nntrainer::TensorDim dim(batch, height, width, channel, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
nntrainer::Tensor input(dim);
GEN_TEST_INPUT_NHWC(input, i * (height * width * channel) +
TEST(nntrainer_Tensor, pow_01_nhwc_p) {
-<<<<<<< HEAD
- nntrainer::Tensor input = constant(4.0, 3, 2, 4, 5, NHWC_);
-=======
nntrainer::Tensor input = constant(4.0, 3, 2, 4, 5, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
nntrainer::Tensor actual, expected;
actual = input.pow(0.5f);
-<<<<<<< HEAD
- expected = constant(2.0, 3, 2, 4, 5, NHWC_);
- EXPECT_EQ(actual, expected);
-
- actual = input.pow(2.0f);
- expected = constant(16.0, 3, 2, 4, 5, NHWC_);
- EXPECT_EQ(actual, expected);
-
- actual = input.pow(-0.5f);
- expected = constant(0.5, 3, 2, 4, 5, NHWC_);
-=======
expected = constant(2.0, 3, 2, 4, 5, NHWC_, FP32_);
EXPECT_EQ(actual, expected);
actual = input.pow(-0.5f);
expected = constant(0.5, 3, 2, 4, 5, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
EXPECT_EQ(actual, expected);
}
int width = 10;
int channel = 3;
-<<<<<<< HEAD
- nntrainer::Tensor target(batch, channel, height, width, NHWC_);
-=======
nntrainer::Tensor target(batch, channel, height, width, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
GEN_TEST_INPUT_NHWC(target, i * (height * width * channel) +
j * (width * channel) + k * channel + l);
int width = 10;
int channel = 3;
-<<<<<<< HEAD
- nntrainer::Tensor target(batch, channel, height, width, NHWC_);
-=======
nntrainer::Tensor target(batch, channel, height, width, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
GEN_TEST_INPUT_NHWC(target, i * (height * width * channel) +
j * (width * channel) + k * channel + l);
int width = 10;
int channel = 3;
-<<<<<<< HEAD
- nntrainer::Tensor target(batch, channel, height, width, NHWC_);
- GEN_TEST_INPUT_NHWC(target, i * (height * width * channel) +
- j * (width * channel) + k * channel + l);
-
- nntrainer::Tensor target2(batch, height, width - 3, channel - 1, NHWC_);
-=======
nntrainer::Tensor target(batch, channel, height, width, NHWC_, FP32_);
GEN_TEST_INPUT_NHWC(target, i * (height * width * channel) +
j * (width * channel) + k * channel + l);
nntrainer::Tensor target2(batch, height, width - 3, channel - 1, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
status = target.subtract_i(target2);
EXPECT_EQ(status, ML_ERROR_INVALID_PARAMETER);
int height = 3;
int width = 10;
-<<<<<<< HEAD
- nntrainer::Tensor input(batch, channel, height, width, NHWC_);
-=======
nntrainer::Tensor input(batch, channel, height, width, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
GEN_TEST_INPUT_NHWC(input, i * (height * width * channel) +
j * (width * channel) + k * channel + 1);
int height = 3;
int width = 10;
-<<<<<<< HEAD
- nntrainer::Tensor input(batch, channel, height, width, NHWC_);
-=======
nntrainer::Tensor input(batch, channel, height, width, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
GEN_TEST_INPUT_NHWC(input, i * (height * width * channel) +
j * (width * channel) + k * channel + 1);
nntrainer::Tensor result = input.subtract(input);
-<<<<<<< HEAD
- EXPECT_EQ(constant(0.0, batch, channel, height, width, NHWC_), result);
-=======
EXPECT_EQ(constant(0.0, batch, channel, height, width, NHWC_, FP32_), result);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
}
TEST(nntrainer_Tensor, subtract_03_nhwc_n) {
int height = 3;
int width = 10;
-<<<<<<< HEAD
- nntrainer::Tensor input(batch, channel, height, width, NHWC_);
- GEN_TEST_INPUT_NHWC(input, i * (height * width * channel) +
- j * (width * channel) + k * channel + 1);
-
- nntrainer::Tensor test(batch - 1, channel - 1, height, width - 1, NHWC_);
-=======
nntrainer::Tensor input(batch, channel, height, width, NHWC_, FP32_);
GEN_TEST_INPUT_NHWC(input, i * (height * width * channel) +
j * (width * channel) + k * channel + 1);
nntrainer::Tensor test(batch - 1, channel - 1, height, width - 1, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
EXPECT_THROW({ input.subtract(test); }, std::invalid_argument);
}
int height = 3;
int width = 10;
-<<<<<<< HEAD
- nntrainer::TensorDim dim(batch, channel, height, width, NHWC_);
-
- nntrainer::Tensor input(batch, 2 * channel, height, width, NHWC_);
-=======
nntrainer::TensorDim dim(batch, channel, height, width, NHWC_, FP32_);
nntrainer::Tensor input(batch, 2 * channel, height, width, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
nntrainer::Tensor shared_input = input.getSharedDataTensor(dim, 0, false);
nntrainer::Tensor test(dim);
int height = 3;
int width = 10;
-<<<<<<< HEAD
- nntrainer::TensorDim dim(batch, channel, height, width, NHWC_);
-
- nntrainer::Tensor input(dim);
- nntrainer::Tensor test(batch, 2 * channel, height, width, NHWC_);
-=======
nntrainer::TensorDim dim(batch, channel, height, width, NHWC_, FP32_);
nntrainer::Tensor input(dim);
nntrainer::Tensor test(batch, 2 * channel, height, width, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
nntrainer::Tensor shared_test = test.getSharedDataTensor(dim, 0, false);
EXPECT_THROW(input.subtract(shared_test), std::invalid_argument);
int height = 3;
int width = 10;
-<<<<<<< HEAD
- nntrainer::TensorDim dim(batch, channel, height, width, NHWC_);
-=======
nntrainer::TensorDim dim(batch, channel, height, width, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
nntrainer::Tensor input(dim, false);
nntrainer::Tensor test(dim);
int height = 3;
int width = 10;
-<<<<<<< HEAD
- nntrainer::TensorDim dim(batch, channel, height, width, NHWC_);
-=======
nntrainer::TensorDim dim(batch, channel, height, width, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
nntrainer::Tensor input(dim);
GEN_TEST_INPUT_NHWC(input, i * (height * width * channel) +
int height = 3;
int width = 10;
-<<<<<<< HEAD
- nntrainer::TensorDim dim(batch, channel, height, width, NHWC_);
-=======
nntrainer::TensorDim dim(batch, channel, height, width, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
nntrainer::Tensor input(dim);
GEN_TEST_INPUT_NHWC(input, i * (height * width * channel) +
int height = 3;
int width = 10;
-<<<<<<< HEAD
- nntrainer::Tensor input(batch, channel, height, width, NHWC_);
- GEN_TEST_INPUT_NHWC(input, i * (height * width * channel) +
- j * (width * channel) + k * channel + 1);
-
- nntrainer::Tensor expected(batch, channel, height, width, NHWC_);
-=======
nntrainer::Tensor input(batch, channel, height, width, NHWC_, FP32_);
GEN_TEST_INPUT_NHWC(input, i * (height * width * channel) +
j * (width * channel) + k * channel + 1);
nntrainer::Tensor expected(batch, channel, height, width, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
GEN_TEST_INPUT_NHWC(expected, i * (height * width * channel) +
j * (width * channel) + k * channel);
int height = 3;
int width = 10;
-<<<<<<< HEAD
- nntrainer::Tensor input(batch, channel, height, width, NHWC_);
-=======
nntrainer::Tensor input(batch, channel, height, width, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
GEN_TEST_INPUT_NHWC(input, i * (height * width * channel) +
j * (width * channel) + k * channel + l);
int height = 3;
int width = 10;
-<<<<<<< HEAD
- nntrainer::Tensor input(batch, channel, height, width, NHWC_);
-=======
nntrainer::Tensor input(batch, channel, height, width, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
GEN_TEST_INPUT_NHWC(input, i * (height * width * channel) +
j * (width * channel) + k * channel + l);
{225, 228},
{231, 234},
{237, 240}}}}),
-<<<<<<< HEAD
- NHWC_);
-=======
- NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
+ {NHWC_, FP32_});
nntrainer::Tensor ans1(
std::vector<std::vector<std::vector<std::vector<float>>>>(
{231},
{235},
{239}}}}),
-<<<<<<< HEAD
- NHWC_);
-=======
- NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
+ {NHWC_, FP32_});
nntrainer::Tensor ans2(
std::vector<std::vector<std::vector<std::vector<float>>>>({{{{22, 24},
{210, 212},
{214, 216},
{218, 220}}}}),
-<<<<<<< HEAD
- NHWC_);
-=======
- NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
+ {NHWC_, FP32_});
nntrainer::Tensor ans3(
std::vector<std::vector<std::vector<std::vector<float>>>>(
{{{{100, 110}}, {{300, 310}}},
{{{500, 510}}, {{700, 710}}},
{{{900, 910}}, {{1100, 1110}}}}),
-<<<<<<< HEAD
- NHWC_);
-
- nntrainer::Tensor input(batch, channel, height, width, NHWC_);
-=======
- NHWC_, FP32_);
+ {NHWC_, FP32_});
nntrainer::Tensor input(batch, channel, height, width, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
GEN_TEST_INPUT_NHWC(input, i * (height * width * channel) +
j * (width * channel) + k * (channel) + l + 1);
const int height = 2;
const int width = 10;
-<<<<<<< HEAD
- nntrainer::Tensor input(batch, channel, height, width, NHWC_);
-=======
nntrainer::Tensor input(batch, channel, height, width, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
GEN_TEST_INPUT_NHWC(input, i * (height * channel * width) +
j * (width * channel) + k * (channel) + l + 1);
// Test for alpha == 1 and beta == 0 and dimension of reduced axis == 1
{225, 228},
{231, 234},
{237, 240}}}}),
-<<<<<<< HEAD
- NHWC_);
-=======
- NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
+ {NHWC_, FP32_});
nntrainer::Tensor ans_1_1_0(
std::vector<std::vector<std::vector<std::vector<float>>>>(
{231},
{235},
{239}}}}),
-<<<<<<< HEAD
- NHWC_);
-=======
- NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
+ {NHWC_, FP32_});
nntrainer::Tensor ans_2_1_0(
std::vector<std::vector<std::vector<std::vector<float>>>>(
{210, 212},
{214, 216},
{218, 220}}}}),
-<<<<<<< HEAD
- NHWC_);
-=======
- NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
+ {NHWC_, FP32_});
nntrainer::Tensor ans_3_1_0(
std::vector<std::vector<std::vector<std::vector<float>>>>(
{{{{100, 110}}, {{300, 310}}},
{{{500, 510}}, {{700, 710}}},
{{{900, 910}}, {{1100, 1110}}}}),
-<<<<<<< HEAD
- NHWC_);
-=======
- NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
+ {NHWC_, FP32_});
nntrainer::Tensor result_0_1_0 = input.sum(0, 1);
nntrainer::Tensor result_1_1_0 = input.sum(1, 1);
{295, 300},
{305, 310},
{315, 320}}}}),
-<<<<<<< HEAD
- NHWC_);
-=======
- NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
+ {NHWC_, FP32_});
nntrainer::Tensor ans_1_1_2(
std::vector<std::vector<std::vector<std::vector<float>>>>(
{347},
{353},
{359}}}}),
-<<<<<<< HEAD
- NHWC_);
-=======
- NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
+ {NHWC_, FP32_});
nntrainer::Tensor ans_2_1_2(
std::vector<std::vector<std::vector<std::vector<float>>>>(
{320, 324},
{328, 332},
{336, 340}}}}),
-<<<<<<< HEAD
- NHWC_);
-=======
- NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
+ {NHWC_, FP32_});
nntrainer::Tensor ans_3_1_2(
std::vector<std::vector<std::vector<std::vector<float>>>>(
{{{{102, 114}}, {{306, 318}}},
{{{510, 522}}, {{714, 726}}},
{{{918, 930}}, {{1122, 1134}}}}),
-<<<<<<< HEAD
- NHWC_);
-
- nntrainer::Tensor output_0_1_2(1, channel, height, width, NHWC_);
-=======
- NHWC_, FP32_);
+ {NHWC_, FP32_});
nntrainer::Tensor output_0_1_2(1, channel, height, width, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
{
const int batch = 1;
GEN_TEST_INPUT_NHWC(output_0_1_2, i * (channel * height * width) +
j * (width * channel) +
k * (channel) + l + 1);
}
-<<<<<<< HEAD
- nntrainer::Tensor output_1_1_2(batch, 1, height, width, NHWC_);
-=======
nntrainer::Tensor output_1_1_2(batch, 1, height, width, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
{
const int channel = 1;
GEN_TEST_INPUT_NHWC(output_1_1_2, i * (channel * height * width) +
j * (width * channel) +
k * (channel) + l + 1);
}
-<<<<<<< HEAD
- nntrainer::Tensor output_2_1_2(batch, channel, 1, width, NHWC_);
-=======
nntrainer::Tensor output_2_1_2(batch, channel, 1, width, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
{
const int height = 1;
GEN_TEST_INPUT_NHWC(output_2_1_2, i * (channel * height * width) +
j * (width * channel) +
k * (channel) + l + 1);
}
-<<<<<<< HEAD
- nntrainer::Tensor output_3_1_2(batch, channel, height, 1, NHWC_);
-=======
nntrainer::Tensor output_3_1_2(batch, channel, height, 1, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
{
const int width = 1;
GEN_TEST_INPUT_NHWC(output_3_1_2, i * (channel * height * width) +
{450, 456},
{462, 468},
{474, 480}}}}),
-<<<<<<< HEAD
- NHWC_);
-=======
- NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
+ {NHWC_, FP32_});
nntrainer::Tensor ans_1_2_0(
std::vector<std::vector<std::vector<std::vector<float>>>>(
{462},
{470},
{478}}}}),
-<<<<<<< HEAD
- NHWC_);
-=======
- NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
+ {NHWC_, FP32_});
nntrainer::Tensor ans_2_2_0(
std::vector<std::vector<std::vector<std::vector<float>>>>(
{420, 424},
{428, 432},
{436, 440}}}}),
-<<<<<<< HEAD
- NHWC_);
-=======
- NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
+ {NHWC_, FP32_});
nntrainer::Tensor ans_3_2_0(
std::vector<std::vector<std::vector<std::vector<float>>>>(
{{{{200, 220}}, {{600, 620}}},
{{{1000, 1020}}, {{1400, 1420}}},
{{{1800, 1820}}, {{2200, 2220}}}}),
-<<<<<<< HEAD
- NHWC_);
-=======
- NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
+ {NHWC_, FP32_});
nntrainer::Tensor result_0_2_0 = input.sum(0, 2);
nntrainer::Tensor result_1_2_0 = input.sum(1, 2);
{520, 528},
{536, 544},
{552, 560}}}}),
-<<<<<<< HEAD
- NHWC_);
-=======
- NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
+ {NHWC_, FP32_});
nntrainer::Tensor ans_1_2_2(
std::vector<std::vector<std::vector<std::vector<float>>>>(
{578},
{588},
{598}}}}),
-<<<<<<< HEAD
- NHWC_);
-=======
- NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
+ {NHWC_, FP32_});
nntrainer::Tensor ans_2_2_2(
std::vector<std::vector<std::vector<std::vector<float>>>>(
{530, 536},
{542, 548},
{554, 560}}}}),
-<<<<<<< HEAD
- NHWC_);
-=======
- NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
+ {NHWC_, FP32_});
nntrainer::Tensor ans_3_2_2(
std::vector<std::vector<std::vector<std::vector<float>>>>(
{{{{202, 224}}, {{606, 628}}},
{{{1010, 1032}}, {{1414, 1436}}},
{{{1818, 1840}}, {{2222, 2244}}}}),
-<<<<<<< HEAD
- NHWC_);
-
- nntrainer::Tensor output_0_2_2(1, channel, height, width, NHWC_);
-=======
- NHWC_, FP32_);
+ {NHWC_, FP32_});
nntrainer::Tensor output_0_2_2(1, channel, height, width, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
{
const int batch = 1;
GEN_TEST_INPUT_NHWC(output_0_2_2, i * (channel * height * width) +
j * (channel * width) +
k * (channel) + l + 1);
}
-<<<<<<< HEAD
- nntrainer::Tensor output_1_2_2(batch, 1, height, width, NHWC_);
-=======
nntrainer::Tensor output_1_2_2(batch, 1, height, width, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
{
const int channel = 1;
GEN_TEST_INPUT_NHWC(output_1_2_2, i * (channel * height * width) +
j * (channel * width) +
k * (channel) + l + 1);
}
-<<<<<<< HEAD
- nntrainer::Tensor output_2_2_2(batch, channel, 1, width, NHWC_);
-=======
nntrainer::Tensor output_2_2_2(batch, channel, 1, width, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
{
const int height = 1;
GEN_TEST_INPUT_NHWC(output_2_2_2, i * (channel * height * width) +
j * (channel * width) +
k * (channel) + l + 1);
}
-<<<<<<< HEAD
- nntrainer::Tensor output_3_2_2(batch, channel, height, 1, NHWC_);
-=======
nntrainer::Tensor output_3_2_2(batch, channel, height, 1, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
{
const int width = 1;
GEN_TEST_INPUT_NHWC(output_3_2_2, i * (channel * height * width) +
int height = 2;
int width = 10;
-<<<<<<< HEAD
- nntrainer::Tensor input(batch, channel, height, width, NHWC_);
-=======
nntrainer::Tensor input(batch, channel, height, width, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
GEN_TEST_INPUT_NHWC(input, i * (height * width * channel) +
j * (channel * width) + k * channel + l + 1);
}
TEST(nntrainer_Tensor, multiple_sum_invalid_args_01_hnwc_n) {
-<<<<<<< HEAD
- nntrainer::Tensor t = constant(1.0, 1, 1, 1, 1, NHWC_);
-=======
nntrainer::Tensor t = constant(1.0, 1, 1, 1, 1, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
EXPECT_THROW(t.sum(std::vector<unsigned int>()), std::invalid_argument);
}
TEST(nntrainer_Tensor, multiple_sum_out_of_range_nhwc_n) {
-<<<<<<< HEAD
- nntrainer::Tensor t = constant(1.0, 1, 1, 1, 1, NHWC_);
-=======
nntrainer::Tensor t = constant(1.0, 1, 1, 1, 1, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
EXPECT_THROW(t.sum({7}), std::out_of_range);
}
TEST(nntrainer_Tensor, multiple_sum_nhwc_p) {
-<<<<<<< HEAD
- nntrainer::Tensor t = constant(1.0, 2, 3, 5, 7, NHWC_);
- nntrainer::Tensor actual, expected;
-
- actual = t.sum({0, 1});
- expected = constant(2 * 3, 1, 1, 5, 7, NHWC_);
- EXPECT_EQ(actual, expected);
-
- actual = t.sum({1, 2, 3});
- expected = constant(3 * 5 * 7, 2, 1, 1, 1, NHWC_);
- EXPECT_EQ(actual, expected);
-
- actual = t.sum({3, 1});
- expected = constant(7 * 3, 2, 1, 5, 1, NHWC_);
- EXPECT_EQ(actual, expected);
-
- actual = t.sum({3, 1}, 0.5);
- expected = constant(7 * 3 * 0.5, 2, 1, 5, 1, NHWC_);
-=======
nntrainer::Tensor t = constant(1.0, 2, 3, 5, 7, NHWC_, FP32_);
nntrainer::Tensor actual, expected;
actual = t.sum({3, 1}, 0.5);
expected = constant(7 * 3 * 0.5, 2, 1, 5, 1, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
EXPECT_EQ(actual, expected);
}
TEST(nntrainer_Tensor, average_nhwc_p) {
-<<<<<<< HEAD
- nntrainer::Tensor t = constant(1.0, 2, 3, 5, 7, NHWC_);
-=======
nntrainer::Tensor t = constant(1.0, 2, 3, 5, 7, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
nntrainer::Tensor actual, expected;
actual = t.average();
-<<<<<<< HEAD
- expected = constant(1.0, 1, 1, 1, 1, NHWC_);
-=======
expected = constant(1.0, 1, 1, 1, 1, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
EXPECT_EQ(actual, expected);
int idx = 0;
- t = t.apply([&](float in) { return idx++ % 2; });
+ t = t.apply((std::function<float (float)>)[&](float in) { return idx++ % 2; });
actual = t.average();
-<<<<<<< HEAD
- expected = constant(0.5, 1, 1, 1, 1, NHWC_);
-=======
expected = constant(0.5, 1, 1, 1, 1, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
EXPECT_EQ(actual, expected);
}
TEST(nntrainer_Tensor, average_axis_nhwc_p) {
-<<<<<<< HEAD
- nntrainer::Tensor t = constant(1.0, 2, 2, 2, 2, NHWC_);
-=======
nntrainer::Tensor t = constant(1.0, 2, 2, 2, 2, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
int idx = 0;
std::function<float(float)> f = [&](float in) { return idx++ % 2; };
t = t.apply(f);
nntrainer::Tensor actual, expected;
actual = t.average(0);
-<<<<<<< HEAD
- expected = constant(0, 1, 2, 2, 2, NHWC_).apply(f);
- EXPECT_EQ(actual, expected);
-
- actual = t.average(1);
- expected = constant(0.5, 2, 1, 2, 2, NHWC_);
- EXPECT_EQ(actual, expected);
-
- actual = t.average(2);
- expected = constant(0, 2, 2, 1, 2, NHWC_).apply(f);
- EXPECT_EQ(actual, expected);
-
- actual = t.average(3);
- expected = constant(0, 2, 2, 2, 1, NHWC_).apply(f);
-=======
expected = constant(0, 1, 2, 2, 2, NHWC_, FP32_).apply(f);
EXPECT_EQ(actual, expected);
actual = t.average(3);
expected = constant(0, 2, 2, 2, 1, NHWC_, FP32_).apply(f);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
EXPECT_EQ(actual, expected);
}
TEST(nntrainer_Tensor, average_axis_out_of_range_01_nhwc_n) {
-<<<<<<< HEAD
- nntrainer::Tensor t = constant(1.0, 2, 2, 2, 2, NHWC_);
-=======
nntrainer::Tensor t = constant(1.0, 2, 2, 2, 2, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
EXPECT_THROW(t.average(-1), std::out_of_range);
}
TEST(nntrainer_Tensor, average_axis_out_of_range_02_nhwc_n) {
-<<<<<<< HEAD
- nntrainer::Tensor t = constant(1.0, 2, 2, 2, 2, NHWC_);
-=======
nntrainer::Tensor t = constant(1.0, 2, 2, 2, 2, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
EXPECT_THROW(t.average(7), std::out_of_range);
}
TEST(nntrainer_Tensor, average_multiple_axes_nhwc_p) {
-<<<<<<< HEAD
- nntrainer::Tensor t = constant(1.0, 2, 3, 5, 7, NHWC_);
- nntrainer::Tensor actual, expected;
-
- actual = t.average({0, 1, 2});
- expected = constant(1.0, 1, 1, 1, 7, NHWC_);
- EXPECT_EQ(actual, expected);
-
- actual = t.average({0, 1, 2, 3});
- expected = constant(1.0, 1, 1, 1, 1, NHWC_);
- EXPECT_EQ(actual, expected);
-
- actual = t.average({3, 1});
- expected = constant(1.0, 2, 1, 5, 1, NHWC_);
- EXPECT_EQ(actual, expected);
-
- actual = t.average({3, 1, 1, 1, 3});
- expected = constant(1.0, 2, 1, 5, 1, NHWC_);
-=======
nntrainer::Tensor t = constant(1.0, 2, 3, 5, 7, NHWC_, FP32_);
nntrainer::Tensor actual, expected;
actual = t.average({3, 1, 1, 1, 3});
expected = constant(1.0, 2, 1, 5, 1, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
EXPECT_EQ(actual, expected);
}
TEST(nntrainer_Tensor, average_multiple_axes_01_nhwc_n) {
-<<<<<<< HEAD
- nntrainer::Tensor t = constant(1.0, 2, 3, 5, 7, NHWC_);
-=======
nntrainer::Tensor t = constant(1.0, 2, 3, 5, 7, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
EXPECT_THROW(t.average({5, 7}), std::out_of_range);
}
TEST(nntrainer_Tensor,
constructor_from_shared_const_ptr_shares_variable_nhwc_n) {
nntrainer::sharedConstTensor A =
-<<<<<<< HEAD
- MAKE_SHARED_TENSOR(constant(1.0f, 3, 4, 5, 6, NHWC_));
-=======
MAKE_SHARED_TENSOR(constant(1.0f, 3, 4, 5, 6, NHWC_, FP32_));
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
nntrainer::Tensor B = *A;
nntrainer::Tensor C = A->clone();
EXPECT_EQ(*A, B);
EXPECT_NE(*A, C);
-<<<<<<< HEAD
- C.reshape(nntrainer::TensorDim(3, 4, 6, 5, NHWC_));
-=======
C.reshape(nntrainer::TensorDim(3, 4, 6, 5, NHWC_, FP32_));
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
EXPECT_EQ(A->getDim(), B.getDim());
EXPECT_NE(A->getDim(), C.getDim());
}
TEST(nntrainer_Tensor, dot_01_nhwc_n) {
-<<<<<<< HEAD
- nntrainer::Tensor input(2, 4, 5, 3, NHWC_);
- nntrainer::Tensor m(1, 4, 5, 3, NHWC_);
-=======
nntrainer::Tensor input(2, 4, 5, 3, NHWC_, FP32_);
nntrainer::Tensor m(1, 4, 5, 3, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
EXPECT_THROW(nntrainer::Tensor result = input.dot(m), std::runtime_error);
}
TEST(nntrainer_Tensor, dot_02_nhwc_n) {
-<<<<<<< HEAD
- nntrainer::Tensor input(2, 3, 4, 5, NHWC_);
- nntrainer::Tensor m(1, 3, 4, 5, NHWC_);
-=======
nntrainer::Tensor input(2, 3, 4, 5, NHWC_, FP32_);
nntrainer::Tensor m(1, 3, 4, 5, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
EXPECT_THROW(nntrainer::Tensor result = input.dot(m, true),
std::runtime_error);
}
TEST(nntrainer_Tensor, dot_02_nhwc_p) {
-<<<<<<< HEAD
- nntrainer::Tensor input(2, 3, 4, 5, NHWC_);
- nntrainer::Tensor m(1, 3, 4, 5, NHWC_);
-=======
nntrainer::Tensor input(2, 3, 4, 5, NHWC_, FP32_);
nntrainer::Tensor m(1, 3, 4, 5, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
EXPECT_NO_THROW(nntrainer::Tensor result = input.dot(m, false, true));
}
TEST(nntrainer_Tensor, dot_03_nhwc_p) {
-<<<<<<< HEAD
- nntrainer::Tensor input(1, 3, 4, 5, NHWC_);
- nntrainer::Tensor m(1, 3, 4, 5, NHWC_);
-=======
nntrainer::Tensor input(1, 3, 4, 5, NHWC_, FP32_);
nntrainer::Tensor m(1, 3, 4, 5, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
EXPECT_NO_THROW(nntrainer::Tensor result = input.dot(m, true));
}
TEST(nntrainer_Tensor, dot_04_nhwc_n) {
-<<<<<<< HEAD
- nntrainer::Tensor input(2, 4, 5, 3, NHWC_);
- nntrainer::Tensor m(1, 4, 5, 1, NHWC_);
-=======
nntrainer::Tensor input(2, 4, 5, 3, NHWC_, FP32_);
nntrainer::Tensor m(1, 4, 5, 1, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
EXPECT_THROW(nntrainer::Tensor result = input.dot(m), std::runtime_error);
EXPECT_NO_THROW(nntrainer::Tensor result = input.dot(m, false, true));
}
int width = 5;
float ans[2][4][5][40] = {0};
-<<<<<<< HEAD
- nntrainer::Tensor input(batch, channel, height, width, NHWC_);
-=======
nntrainer::Tensor input(batch, channel, height, width, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
GEN_TEST_INPUT_NHWC(input, i * (height * width * channel) +
j * (width * channel) + k * (channel) + l + 1);
-<<<<<<< HEAD
- nntrainer::Tensor weight(batch, channel, height, width, NHWC_);
- GEN_TEST_INPUT_NHWC(weight, i * (height * width * channel) +
- j * (width * channel) + k * (channel) + l + 1);
-
- weight.reshape(nntrainer::TensorDim(1, 3, 8, 5, NHWC_));
-=======
nntrainer::Tensor weight(batch, channel, height, width, NHWC_, FP32_);
GEN_TEST_INPUT_NHWC(weight, i * (height * width * channel) +
j * (width * channel) + k * (channel) + l + 1);
weight.reshape(nntrainer::TensorDim(1, 3, 8, 5, NHWC_, FP32_));
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
nntrainer::Tensor result = input.dot(weight, false, true);
float ans[3][1][1][3] = {
{{{30, 36, 42}}}, {{{66, 81, 96}}}, {{{102, 126, 150}}}};
-<<<<<<< HEAD
- nntrainer::Tensor input(batch, channel, height, width, NHWC_);
-=======
nntrainer::Tensor input(batch, channel, height, width, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
GEN_TEST_INPUT_NHWC(input, i * (height * width * channel) +
j * (width * channel) + k * (channel) + l + 1);
TEST(nntrainer_Tensor, dot_transpose_nhwc_p) {
{
float a_data[] = {0, 3, 6, 9, 1, 4, 7, 10, 2, 5, 8, 11};
-<<<<<<< HEAD
- nntrainer::Tensor a(nntrainer::TensorDim(1, 4, 3, 1, NHWC_), a_data);
- float b_data[] = {0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11};
- nntrainer::Tensor b(nntrainer::TensorDim(1, 3, 4, 1, NHWC_), b_data);
- float answer_data[] = {20, 23, 26, 29, 56, 68, 80, 92,
- 92, 113, 134, 155, 128, 158, 188, 218};
- nntrainer::Tensor answer(nntrainer::TensorDim(1, 4, 4, 1, NHWC_),
-=======
nntrainer::Tensor a(nntrainer::TensorDim(1, 4, 3, 1, NHWC_, FP32_), a_data);
float b_data[] = {0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11};
nntrainer::Tensor b(nntrainer::TensorDim(1, 3, 4, 1, NHWC_, FP32_), b_data);
float answer_data[] = {20, 23, 26, 29, 56, 68, 80, 92,
92, 113, 134, 155, 128, 158, 188, 218};
nntrainer::Tensor answer(nntrainer::TensorDim(1, 4, 4, 1, NHWC_, FP32_),
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
answer_data);
nntrainer::Tensor ret = a.dot(b, true, true);
EXPECT_EQ(ret, answer);
}
{
float a_data[] = {0, 3, 6, 9, 1, 4, 7, 10, 2, 5, 8, 11};
-<<<<<<< HEAD
- nntrainer::Tensor a(nntrainer::TensorDim(1, 4, 3, 1, NHWC_), a_data);
- float b_data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11};
- nntrainer::Tensor b(nntrainer::TensorDim(1, 4, 3, 1, NHWC_), b_data);
- float answer_data[] = {20, 23, 26, 29, 56, 68, 80, 92,
- 92, 113, 134, 155, 128, 158, 188, 218};
- nntrainer::Tensor answer(nntrainer::TensorDim(1, 4, 4, 1, NHWC_),
-=======
nntrainer::Tensor a(nntrainer::TensorDim(1, 4, 3, 1, NHWC_, FP32_), a_data);
float b_data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11};
nntrainer::Tensor b(nntrainer::TensorDim(1, 4, 3, 1, NHWC_, FP32_), b_data);
float answer_data[] = {20, 23, 26, 29, 56, 68, 80, 92,
92, 113, 134, 155, 128, 158, 188, 218};
nntrainer::Tensor answer(nntrainer::TensorDim(1, 4, 4, 1, NHWC_, FP32_),
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
answer_data);
nntrainer::Tensor ret = a.dot(b, true, false);
EXPECT_EQ(ret, answer);
}
{
float a_data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11};
-<<<<<<< HEAD
- nntrainer::Tensor a(nntrainer::TensorDim(1, 3, 4, 1, NHWC_), a_data);
- float b_data[] = {0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11};
- nntrainer::Tensor b(nntrainer::TensorDim(1, 3, 4, 1, NHWC_), b_data);
- float answer_data[] = {20, 23, 26, 29, 56, 68, 80, 92,
- 92, 113, 134, 155, 128, 158, 188, 218};
- nntrainer::Tensor answer(nntrainer::TensorDim(1, 4, 4, 1, NHWC_),
-=======
nntrainer::Tensor a(nntrainer::TensorDim(1, 3, 4, 1, NHWC_, FP32_), a_data);
float b_data[] = {0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11};
nntrainer::Tensor b(nntrainer::TensorDim(1, 3, 4, 1, NHWC_, FP32_), b_data);
float answer_data[] = {20, 23, 26, 29, 56, 68, 80, 92,
92, 113, 134, 155, 128, 158, 188, 218};
nntrainer::Tensor answer(nntrainer::TensorDim(1, 4, 4, 1, NHWC_, FP32_),
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
answer_data);
nntrainer::Tensor ret = a.dot(b, false, true);
EXPECT_EQ(ret, answer);
}
{
float a_data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11};
-<<<<<<< HEAD
- nntrainer::Tensor a(nntrainer::TensorDim(1, 3, 4, 1, NHWC_), a_data);
- float b_data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11};
- nntrainer::Tensor b(nntrainer::TensorDim(1, 4, 3, 1, NHWC_), b_data);
- float answer_data[] = {20, 23, 26, 29, 56, 68, 80, 92,
- 92, 113, 134, 155, 128, 158, 188, 218};
- nntrainer::Tensor answer(nntrainer::TensorDim(1, 4, 4, 1, NHWC_),
-=======
nntrainer::Tensor a(nntrainer::TensorDim(1, 3, 4, 1, NHWC_, FP32_), a_data);
float b_data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11};
nntrainer::Tensor b(nntrainer::TensorDim(1, 4, 3, 1, NHWC_, FP32_), b_data);
float answer_data[] = {20, 23, 26, 29, 56, 68, 80, 92,
92, 113, 134, 155, 128, 158, 188, 218};
nntrainer::Tensor answer(nntrainer::TensorDim(1, 4, 4, 1, NHWC_, FP32_),
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
answer_data);
nntrainer::Tensor ret = a.dot(b, false, false);
EXPECT_EQ(ret, answer);
}
{
float a_data[] = {0, 3, 1, 4, 2, 5};
-<<<<<<< HEAD
- nntrainer::Tensor a(nntrainer::TensorDim(1, 2, 3, 1, NHWC_), a_data);
- float b_data[] = {0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11};
- nntrainer::Tensor b(nntrainer::TensorDim(1, 3, 4, 1, NHWC_), b_data);
- float answer_data[] = {20, 23, 26, 29, 56, 68, 80, 92};
- nntrainer::Tensor answer(nntrainer::TensorDim(1, 4, 2, 1, NHWC_),
-=======
nntrainer::Tensor a(nntrainer::TensorDim(1, 2, 3, 1, NHWC_, FP32_), a_data);
float b_data[] = {0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11};
nntrainer::Tensor b(nntrainer::TensorDim(1, 3, 4, 1, NHWC_, FP32_), b_data);
float answer_data[] = {20, 23, 26, 29, 56, 68, 80, 92};
nntrainer::Tensor answer(nntrainer::TensorDim(1, 4, 2, 1, NHWC_, FP32_),
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
answer_data);
nntrainer::Tensor ret = a.dot(b, true, true);
EXPECT_EQ(ret, answer);
}
{
float a_data[] = {0, 3, 1, 4, 2, 5};
-<<<<<<< HEAD
- nntrainer::Tensor a(nntrainer::TensorDim(1, 2, 3, 1, NHWC_), a_data);
- float b_data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11};
- nntrainer::Tensor b(nntrainer::TensorDim(1, 4, 3, 1, NHWC_), b_data);
- float answer_data[] = {20, 23, 26, 29, 56, 68, 80, 92};
- nntrainer::Tensor answer(nntrainer::TensorDim(1, 4, 2, 1, NHWC_),
-=======
nntrainer::Tensor a(nntrainer::TensorDim(1, 2, 3, 1, NHWC_, FP32_), a_data);
float b_data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11};
nntrainer::Tensor b(nntrainer::TensorDim(1, 4, 3, 1, NHWC_, FP32_), b_data);
float answer_data[] = {20, 23, 26, 29, 56, 68, 80, 92};
nntrainer::Tensor answer(nntrainer::TensorDim(1, 4, 2, 1, NHWC_, FP32_),
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
answer_data);
nntrainer::Tensor ret = a.dot(b, true, false);
EXPECT_EQ(ret, answer);
}
{
float a_data[] = {0, 1, 2, 3, 4, 5};
-<<<<<<< HEAD
- nntrainer::Tensor a(nntrainer::TensorDim(1, 3, 2, 1, NHWC_), a_data);
- float b_data[] = {0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11};
- nntrainer::Tensor b(nntrainer::TensorDim(1, 3, 4, 1, NHWC_), b_data);
- float answer_data[] = {20, 23, 26, 29, 56, 68, 80, 92};
- nntrainer::Tensor answer(nntrainer::TensorDim(1, 4, 2, 1, NHWC_),
-=======
nntrainer::Tensor a(nntrainer::TensorDim(1, 3, 2, 1, NHWC_, FP32_), a_data);
float b_data[] = {0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11};
nntrainer::Tensor b(nntrainer::TensorDim(1, 3, 4, 1, NHWC_, FP32_), b_data);
float answer_data[] = {20, 23, 26, 29, 56, 68, 80, 92};
nntrainer::Tensor answer(nntrainer::TensorDim(1, 4, 2, 1, NHWC_, FP32_),
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
answer_data);
nntrainer::Tensor ret = a.dot(b, false, true);
EXPECT_EQ(ret, answer);
}
{
float a_data[] = {0, 1, 2, 3, 4, 5};
-<<<<<<< HEAD
- nntrainer::Tensor a(nntrainer::TensorDim(1, 3, 2, 1, NHWC_), a_data);
- float b_data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11};
- nntrainer::Tensor b(nntrainer::TensorDim(1, 4, 3, 1, NHWC_), b_data);
- float answer_data[] = {20, 23, 26, 29, 56, 68, 80, 92};
- nntrainer::Tensor answer(nntrainer::TensorDim(1, 4, 2, 1, NHWC_),
-=======
nntrainer::Tensor a(nntrainer::TensorDim(1, 3, 2, 1, NHWC_, FP32_), a_data);
float b_data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11};
nntrainer::Tensor b(nntrainer::TensorDim(1, 4, 3, 1, NHWC_, FP32_), b_data);
float answer_data[] = {20, 23, 26, 29, 56, 68, 80, 92};
nntrainer::Tensor answer(nntrainer::TensorDim(1, 4, 2, 1, NHWC_, FP32_),
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
answer_data);
nntrainer::Tensor ret = a.dot(b, false, false);
EXPECT_EQ(ret, answer);
}
{
float a_data[] = {0, 3, 6, 9, 1, 4, 7, 10, 2, 5, 8, 11};
-<<<<<<< HEAD
- nntrainer::Tensor a(nntrainer::TensorDim(1, 4, 3, 1, NHWC_), a_data);
- float b_data[] = {0, 2, 4, 1, 3, 5};
- nntrainer::Tensor b(nntrainer::TensorDim(1, 3, 2, 1, NHWC_), b_data);
- float answer_data[] = {10, 13, 28, 40, 46, 67, 64, 94};
- nntrainer::Tensor answer(nntrainer::TensorDim(1, 2, 4, 1, NHWC_),
-=======
nntrainer::Tensor a(nntrainer::TensorDim(1, 4, 3, 1, NHWC_, FP32_), a_data);
float b_data[] = {0, 2, 4, 1, 3, 5};
nntrainer::Tensor b(nntrainer::TensorDim(1, 3, 2, 1, NHWC_, FP32_), b_data);
float answer_data[] = {10, 13, 28, 40, 46, 67, 64, 94};
nntrainer::Tensor answer(nntrainer::TensorDim(1, 2, 4, 1, NHWC_, FP32_),
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
answer_data);
nntrainer::Tensor ret = a.dot(b, true, true);
EXPECT_EQ(ret, answer);
}
{
float a_data[] = {0, 3, 6, 9, 1, 4, 7, 10, 2, 5, 8, 11};
-<<<<<<< HEAD
- nntrainer::Tensor a(nntrainer::TensorDim(1, 4, 3, 1, NHWC_), a_data);
- float b_data[] = {0, 1, 2, 3, 4, 5};
- nntrainer::Tensor b(nntrainer::TensorDim(1, 2, 3, 1, NHWC_), b_data);
- float answer_data[] = {10, 13, 28, 40, 46, 67, 64, 94};
- nntrainer::Tensor answer(nntrainer::TensorDim(1, 2, 4, 1, NHWC_),
-=======
nntrainer::Tensor a(nntrainer::TensorDim(1, 4, 3, 1, NHWC_, FP32_), a_data);
float b_data[] = {0, 1, 2, 3, 4, 5};
nntrainer::Tensor b(nntrainer::TensorDim(1, 2, 3, 1, NHWC_, FP32_), b_data);
float answer_data[] = {10, 13, 28, 40, 46, 67, 64, 94};
nntrainer::Tensor answer(nntrainer::TensorDim(1, 2, 4, 1, NHWC_, FP32_),
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
answer_data);
nntrainer::Tensor ret = a.dot(b, true, false);
EXPECT_EQ(ret, answer);
}
{
float a_data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11};
-<<<<<<< HEAD
- nntrainer::Tensor a(nntrainer::TensorDim(1, 3, 4, 1, NHWC_), a_data);
- float b_data[] = {0, 2, 4, 1, 3, 5};
- nntrainer::Tensor b(nntrainer::TensorDim(1, 3, 2, 1, NHWC_), b_data);
- float answer_data[] = {10, 13, 28, 40, 46, 67, 64, 94};
- nntrainer::Tensor answer(nntrainer::TensorDim(1, 2, 4, 1, NHWC_),
-=======
nntrainer::Tensor a(nntrainer::TensorDim(1, 3, 4, 1, NHWC_, FP32_), a_data);
float b_data[] = {0, 2, 4, 1, 3, 5};
nntrainer::Tensor b(nntrainer::TensorDim(1, 3, 2, 1, NHWC_, FP32_), b_data);
float answer_data[] = {10, 13, 28, 40, 46, 67, 64, 94};
nntrainer::Tensor answer(nntrainer::TensorDim(1, 2, 4, 1, NHWC_, FP32_),
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
answer_data);
nntrainer::Tensor ret = a.dot(b, false, true);
EXPECT_EQ(ret, answer);
}
{
float a_data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11};
-<<<<<<< HEAD
- nntrainer::Tensor a(nntrainer::TensorDim(1, 3, 4, 1, NHWC_), a_data);
- float b_data[] = {0, 1, 2, 3, 4, 5};
- nntrainer::Tensor b(nntrainer::TensorDim(1, 2, 3, 1, NHWC_), b_data);
- float answer_data[] = {10, 13, 28, 40, 46, 67, 64, 94};
- nntrainer::Tensor answer(nntrainer::TensorDim(1, 2, 4, 1, NHWC_),
-=======
nntrainer::Tensor a(nntrainer::TensorDim(1, 3, 4, 1, NHWC_, FP32_), a_data);
float b_data[] = {0, 1, 2, 3, 4, 5};
nntrainer::Tensor b(nntrainer::TensorDim(1, 2, 3, 1, NHWC_, FP32_), b_data);
float answer_data[] = {10, 13, 28, 40, 46, 67, 64, 94};
nntrainer::Tensor answer(nntrainer::TensorDim(1, 2, 4, 1, NHWC_, FP32_),
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
answer_data);
nntrainer::Tensor ret = a.dot(b, false, false);
EXPECT_EQ(ret, answer);
}
{
float a_data[] = {0, 3, 1, 4, 2, 5};
-<<<<<<< HEAD
- nntrainer::Tensor a(nntrainer::TensorDim(1, 2, 3, 1, NHWC_), a_data);
- float b_data[] = {0, 2, 4, 1, 3, 5};
- nntrainer::Tensor b(nntrainer::TensorDim(1, 3, 2, 1, NHWC_), b_data);
- float answer_data[] = {10, 13, 28, 40};
- nntrainer::Tensor answer(nntrainer::TensorDim(1, 2, 2, 1, NHWC_),
-=======
nntrainer::Tensor a(nntrainer::TensorDim(1, 2, 3, 1, NHWC_, FP32_), a_data);
float b_data[] = {0, 2, 4, 1, 3, 5};
nntrainer::Tensor b(nntrainer::TensorDim(1, 3, 2, 1, NHWC_, FP32_), b_data);
float answer_data[] = {10, 13, 28, 40};
nntrainer::Tensor answer(nntrainer::TensorDim(1, 2, 2, 1, NHWC_, FP32_),
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
answer_data);
nntrainer::Tensor ret = a.dot(b, true, true);
EXPECT_EQ(ret, answer);
}
{
float a_data[] = {0, 3, 1, 4, 2, 5};
-<<<<<<< HEAD
- nntrainer::Tensor a(nntrainer::TensorDim(1, 2, 3, 1, NHWC_), a_data);
- float b_data[] = {0, 1, 2, 3, 4, 5};
- nntrainer::Tensor b(nntrainer::TensorDim(1, 2, 3, 1, NHWC_), b_data);
- float answer_data[] = {10, 13, 28, 40};
- nntrainer::Tensor answer(nntrainer::TensorDim(1, 2, 2, 1, NHWC_),
-=======
nntrainer::Tensor a(nntrainer::TensorDim(1, 2, 3, 1, NHWC_, FP32_), a_data);
float b_data[] = {0, 1, 2, 3, 4, 5};
nntrainer::Tensor b(nntrainer::TensorDim(1, 2, 3, 1, NHWC_, FP32_), b_data);
float answer_data[] = {10, 13, 28, 40};
nntrainer::Tensor answer(nntrainer::TensorDim(1, 2, 2, 1, NHWC_, FP32_),
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
answer_data);
nntrainer::Tensor ret = a.dot(b, true, false);
EXPECT_EQ(ret, answer);
}
{
float a_data[] = {0, 1, 2, 3, 4, 5};
-<<<<<<< HEAD
- nntrainer::Tensor a(nntrainer::TensorDim(1, 3, 2, 1, NHWC_), a_data);
- float b_data[] = {0, 2, 4, 1, 3, 5};
- nntrainer::Tensor b(nntrainer::TensorDim(1, 3, 2, 1, NHWC_), b_data);
- float answer_data[] = {10, 13, 28, 40};
- nntrainer::Tensor answer(nntrainer::TensorDim(1, 2, 2, 1, NHWC_),
-=======
nntrainer::Tensor a(nntrainer::TensorDim(1, 3, 2, 1, NHWC_, FP32_), a_data);
float b_data[] = {0, 2, 4, 1, 3, 5};
nntrainer::Tensor b(nntrainer::TensorDim(1, 3, 2, 1, NHWC_, FP32_), b_data);
float answer_data[] = {10, 13, 28, 40};
nntrainer::Tensor answer(nntrainer::TensorDim(1, 2, 2, 1, NHWC_, FP32_),
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
answer_data);
nntrainer::Tensor ret = a.dot(b, false, true);
EXPECT_EQ(ret, answer);
}
{
float a_data[] = {0, 1, 2, 3, 4, 5};
-<<<<<<< HEAD
- nntrainer::Tensor a(nntrainer::TensorDim(1, 3, 2, 1, NHWC_), a_data);
- float b_data[] = {0, 1, 2, 3, 4, 5};
- nntrainer::Tensor b(nntrainer::TensorDim(1, 2, 3, 1, NHWC_), b_data);
- float answer_data[] = {10, 13, 28, 40};
- nntrainer::Tensor answer(nntrainer::TensorDim(1, 2, 2, 1, NHWC_),
-=======
nntrainer::Tensor a(nntrainer::TensorDim(1, 3, 2, 1, NHWC_, FP32_), a_data);
float b_data[] = {0, 1, 2, 3, 4, 5};
nntrainer::Tensor b(nntrainer::TensorDim(1, 2, 3, 1, NHWC_, FP32_), b_data);
float answer_data[] = {10, 13, 28, 40};
nntrainer::Tensor answer(nntrainer::TensorDim(1, 2, 2, 1, NHWC_, FP32_),
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
answer_data);
nntrainer::Tensor ret = a.dot(b, false, false);
EXPECT_EQ(ret, answer);
TEST(nntrainer_Tensor, dot_shortcuts_nhwc_p) {
{
float a_data[] = {0, 1, 2};
-<<<<<<< HEAD
- nntrainer::Tensor a(nntrainer::TensorDim(1, 3, 1, 1, NHWC_), a_data);
- float b_data[] = {0, 1, 2};
- nntrainer::Tensor b(nntrainer::TensorDim(1, 1, 3, 1, NHWC_), b_data);
- float answer_data[] = {5};
- nntrainer::Tensor answer(nntrainer::TensorDim(1, 1, 1, 1, NHWC_),
-=======
nntrainer::Tensor a(nntrainer::TensorDim(1, 3, 1, 1, NHWC_, FP32_), a_data);
float b_data[] = {0, 1, 2};
nntrainer::Tensor b(nntrainer::TensorDim(1, 1, 3, 1, NHWC_, FP32_), b_data);
float answer_data[] = {5};
nntrainer::Tensor answer(nntrainer::TensorDim(1, 1, 1, 1, NHWC_, FP32_),
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
answer_data);
nntrainer::Tensor ret = a.dot(b, false, false);
EXPECT_EQ(ret, answer);
}
{
float a_data[] = {0, 1, 2};
-<<<<<<< HEAD
- nntrainer::Tensor a(nntrainer::TensorDim(1, 1, 3, 1, NHWC_), a_data);
- float b_data[] = {0, 1, 2};
- nntrainer::Tensor b(nntrainer::TensorDim(1, 1, 3, 1, NHWC_), b_data);
- float answer_data[] = {5};
- nntrainer::Tensor answer(nntrainer::TensorDim(1, 1, 1, 1, NHWC_),
-=======
nntrainer::Tensor a(nntrainer::TensorDim(1, 1, 3, 1, NHWC_, FP32_), a_data);
float b_data[] = {0, 1, 2};
nntrainer::Tensor b(nntrainer::TensorDim(1, 1, 3, 1, NHWC_, FP32_), b_data);
float answer_data[] = {5};
nntrainer::Tensor answer(nntrainer::TensorDim(1, 1, 1, 1, NHWC_, FP32_),
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
answer_data);
nntrainer::Tensor ret = a.dot(b, true, false);
EXPECT_EQ(ret, answer);
}
{
float a_data[] = {0, 1, 2};
-<<<<<<< HEAD
- nntrainer::Tensor a(nntrainer::TensorDim(1, 3, 1, 1, NHWC_), a_data);
- float b_data[] = {0, 1, 2};
- nntrainer::Tensor b(nntrainer::TensorDim(1, 3, 1, 1, NHWC_), b_data);
- float answer_data[] = {5};
- nntrainer::Tensor answer(nntrainer::TensorDim(1, 1, 1, 1, NHWC_),
-=======
nntrainer::Tensor a(nntrainer::TensorDim(1, 3, 1, 1, NHWC_, FP32_), a_data);
float b_data[] = {0, 1, 2};
nntrainer::Tensor b(nntrainer::TensorDim(1, 3, 1, 1, NHWC_, FP32_), b_data);
float answer_data[] = {5};
nntrainer::Tensor answer(nntrainer::TensorDim(1, 1, 1, 1, NHWC_, FP32_),
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
answer_data);
nntrainer::Tensor ret = a.dot(b, false, true);
EXPECT_EQ(ret, answer);
}
{
float a_data[] = {0, 1, 2};
-<<<<<<< HEAD
- nntrainer::Tensor a(nntrainer::TensorDim(1, 1, 3, 1, NHWC_), a_data);
- float b_data[] = {0, 1, 2};
- nntrainer::Tensor b(nntrainer::TensorDim(1, 3, 1, 1, NHWC_), b_data);
- float answer_data[] = {5};
- nntrainer::Tensor answer(nntrainer::TensorDim(1, 1, 1, 1, NHWC_),
-=======
nntrainer::Tensor a(nntrainer::TensorDim(1, 1, 3, 1, NHWC_, FP32_), a_data);
float b_data[] = {0, 1, 2};
nntrainer::Tensor b(nntrainer::TensorDim(1, 3, 1, 1, NHWC_, FP32_), b_data);
float answer_data[] = {5};
nntrainer::Tensor answer(nntrainer::TensorDim(1, 1, 1, 1, NHWC_, FP32_),
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
answer_data);
nntrainer::Tensor ret = a.dot(b, true, true);
EXPECT_EQ(ret, answer);
}
{
float a_data[] = {0, 1, 2, 3, 4, 5};
-<<<<<<< HEAD
- nntrainer::Tensor a(nntrainer::TensorDim(1, 3, 2, 1, NHWC_), a_data);
- float b_data[] = {0, 1, 2};
- nntrainer::Tensor b(nntrainer::TensorDim(1, 1, 3, 1, NHWC_), b_data);
- float answer_data[] = {5, 14};
- nntrainer::Tensor answer(nntrainer::TensorDim(1, 1, 2, 1, NHWC_),
-=======
nntrainer::Tensor a(nntrainer::TensorDim(1, 3, 2, 1, NHWC_, FP32_), a_data);
float b_data[] = {0, 1, 2};
nntrainer::Tensor b(nntrainer::TensorDim(1, 1, 3, 1, NHWC_, FP32_), b_data);
float answer_data[] = {5, 14};
nntrainer::Tensor answer(nntrainer::TensorDim(1, 1, 2, 1, NHWC_, FP32_),
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
answer_data);
nntrainer::Tensor ret = a.dot(b, false, false);
EXPECT_EQ(ret, answer);
}
{
float a_data[] = {0, 3, 1, 4, 2, 5};
-<<<<<<< HEAD
- nntrainer::Tensor a(nntrainer::TensorDim(1, 2, 3, 1, NHWC_), a_data);
- float b_data[] = {0, 1, 2};
- nntrainer::Tensor b(nntrainer::TensorDim(1, 1, 3, 1, NHWC_), b_data);
- float answer_data[] = {5, 14};
- nntrainer::Tensor answer(nntrainer::TensorDim(1, 1, 2, 1, NHWC_),
-=======
nntrainer::Tensor a(nntrainer::TensorDim(1, 2, 3, 1, NHWC_, FP32_), a_data);
float b_data[] = {0, 1, 2};
nntrainer::Tensor b(nntrainer::TensorDim(1, 1, 3, 1, NHWC_, FP32_), b_data);
float answer_data[] = {5, 14};
nntrainer::Tensor answer(nntrainer::TensorDim(1, 1, 2, 1, NHWC_, FP32_),
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
answer_data);
nntrainer::Tensor ret = a.dot(b, true, false);
EXPECT_EQ(ret, answer);
}
{
float a_data[] = {0, 1, 2, 3, 4, 5};
-<<<<<<< HEAD
- nntrainer::Tensor a(nntrainer::TensorDim(1, 3, 2, 1, NHWC_), a_data);
- float b_data[] = {0, 1, 2};
- nntrainer::Tensor b(nntrainer::TensorDim(1, 3, 1, 1, NHWC_), b_data);
- float answer_data[] = {5, 14};
- nntrainer::Tensor answer(nntrainer::TensorDim(1, 1, 2, 1, NHWC_),
-=======
nntrainer::Tensor a(nntrainer::TensorDim(1, 3, 2, 1, NHWC_, FP32_), a_data);
float b_data[] = {0, 1, 2};
nntrainer::Tensor b(nntrainer::TensorDim(1, 3, 1, 1, NHWC_, FP32_), b_data);
float answer_data[] = {5, 14};
nntrainer::Tensor answer(nntrainer::TensorDim(1, 1, 2, 1, NHWC_, FP32_),
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
answer_data);
nntrainer::Tensor ret = a.dot(b, false, true);
EXPECT_EQ(ret, answer);
}
{
float a_data[] = {0, 3, 1, 4, 2, 5};
-<<<<<<< HEAD
- nntrainer::Tensor a(nntrainer::TensorDim(1, 2, 3, 1, NHWC_), a_data);
- float b_data[] = {0, 1, 2};
- nntrainer::Tensor b(nntrainer::TensorDim(1, 3, 1, 1, NHWC_), b_data);
- float answer_data[] = {5, 14};
- nntrainer::Tensor answer(nntrainer::TensorDim(1, 1, 2, 1, NHWC_),
-=======
nntrainer::Tensor a(nntrainer::TensorDim(1, 2, 3, 1, NHWC_, FP32_), a_data);
float b_data[] = {0, 1, 2};
nntrainer::Tensor b(nntrainer::TensorDim(1, 3, 1, 1, NHWC_, FP32_), b_data);
float answer_data[] = {5, 14};
nntrainer::Tensor answer(nntrainer::TensorDim(1, 1, 2, 1, NHWC_, FP32_),
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
answer_data);
nntrainer::Tensor ret = a.dot(b, true, true);
EXPECT_EQ(ret, answer);
}
{
float a_data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11};
-<<<<<<< HEAD
- nntrainer::Tensor a(nntrainer::TensorDim(1, 3, 4, 1, NHWC_), a_data);
- float b_data[] = {0, 1, 2};
- nntrainer::Tensor b(nntrainer::TensorDim(1, 1, 3, 1, NHWC_), b_data);
- float answer_data[] = {5, 14, 23, 32};
- nntrainer::Tensor answer(nntrainer::TensorDim(1, 1, 4, 1, NHWC_),
-=======
nntrainer::Tensor a(nntrainer::TensorDim(1, 3, 4, 1, NHWC_, FP32_), a_data);
float b_data[] = {0, 1, 2};
nntrainer::Tensor b(nntrainer::TensorDim(1, 1, 3, 1, NHWC_, FP32_), b_data);
float answer_data[] = {5, 14, 23, 32};
nntrainer::Tensor answer(nntrainer::TensorDim(1, 1, 4, 1, NHWC_, FP32_),
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
answer_data);
nntrainer::Tensor ret = a.dot(b, false, false);
EXPECT_EQ(ret, answer);
}
{
float a_data[] = {0, 3, 6, 9, 1, 4, 7, 10, 2, 5, 8, 11};
-<<<<<<< HEAD
- nntrainer::Tensor a(nntrainer::TensorDim(1, 4, 3, 1, NHWC_), a_data);
- float b_data[] = {0, 1, 2};
- nntrainer::Tensor b(nntrainer::TensorDim(1, 1, 3, 1, NHWC_), b_data);
- float answer_data[] = {5, 14, 23, 32};
- nntrainer::Tensor answer(nntrainer::TensorDim(1, 1, 4, 1, NHWC_),
-=======
nntrainer::Tensor a(nntrainer::TensorDim(1, 4, 3, 1, NHWC_, FP32_), a_data);
float b_data[] = {0, 1, 2};
nntrainer::Tensor b(nntrainer::TensorDim(1, 1, 3, 1, NHWC_, FP32_), b_data);
float answer_data[] = {5, 14, 23, 32};
nntrainer::Tensor answer(nntrainer::TensorDim(1, 1, 4, 1, NHWC_, FP32_),
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
answer_data);
nntrainer::Tensor ret = a.dot(b, true, false);
EXPECT_EQ(ret, answer);
}
{
float a_data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11};
-<<<<<<< HEAD
- nntrainer::Tensor a(nntrainer::TensorDim(1, 3, 4, 1, NHWC_), a_data);
- float b_data[] = {0, 1, 2};
- nntrainer::Tensor b(nntrainer::TensorDim(1, 3, 1, 1, NHWC_), b_data);
- float answer_data[] = {5, 14, 23, 32};
- nntrainer::Tensor answer(nntrainer::TensorDim(1, 1, 4, 1, NHWC_),
-=======
nntrainer::Tensor a(nntrainer::TensorDim(1, 3, 4, 1, NHWC_, FP32_), a_data);
float b_data[] = {0, 1, 2};
nntrainer::Tensor b(nntrainer::TensorDim(1, 3, 1, 1, NHWC_, FP32_), b_data);
float answer_data[] = {5, 14, 23, 32};
nntrainer::Tensor answer(nntrainer::TensorDim(1, 1, 4, 1, NHWC_, FP32_),
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
answer_data);
nntrainer::Tensor ret = a.dot(b, false, true);
EXPECT_EQ(ret, answer);
}
{
float a_data[] = {0, 3, 6, 9, 1, 4, 7, 10, 2, 5, 8, 11};
-<<<<<<< HEAD
- nntrainer::Tensor a(nntrainer::TensorDim(1, 4, 3, 1, NHWC_), a_data);
- float b_data[] = {0, 1, 2};
- nntrainer::Tensor b(nntrainer::TensorDim(1, 3, 1, 1, NHWC_), b_data);
- float answer_data[] = {5, 14, 23, 32};
- nntrainer::Tensor answer(nntrainer::TensorDim(1, 1, 4, 1, NHWC_),
-=======
nntrainer::Tensor a(nntrainer::TensorDim(1, 4, 3, 1, NHWC_, FP32_), a_data);
float b_data[] = {0, 1, 2};
nntrainer::Tensor b(nntrainer::TensorDim(1, 3, 1, 1, NHWC_, FP32_), b_data);
float answer_data[] = {5, 14, 23, 32};
nntrainer::Tensor answer(nntrainer::TensorDim(1, 1, 4, 1, NHWC_, FP32_),
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
answer_data);
nntrainer::Tensor ret = a.dot(b, true, true);
EXPECT_EQ(ret, answer);
}
{
float a_data[] = {0, 1, 2};
-<<<<<<< HEAD
- nntrainer::Tensor a(nntrainer::TensorDim(1, 3, 1, 1, NHWC_), a_data);
- float b_data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11};
- nntrainer::Tensor b(nntrainer::TensorDim(1, 4, 3, 1, NHWC_), b_data);
- float answer_data[] = {20, 23, 26, 29};
- nntrainer::Tensor answer(nntrainer::TensorDim(1, 4, 1, 1, NHWC_),
-=======
nntrainer::Tensor a(nntrainer::TensorDim(1, 3, 1, 1, NHWC_, FP32_), a_data);
float b_data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11};
nntrainer::Tensor b(nntrainer::TensorDim(1, 4, 3, 1, NHWC_, FP32_), b_data);
float answer_data[] = {20, 23, 26, 29};
nntrainer::Tensor answer(nntrainer::TensorDim(1, 4, 1, 1, NHWC_, FP32_),
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
answer_data);
nntrainer::Tensor ret = a.dot(b, false, false);
EXPECT_EQ(ret, answer);
}
{
float a_data[] = {0, 1, 2};
-<<<<<<< HEAD
- nntrainer::Tensor a(nntrainer::TensorDim(1, 1, 3, 1, NHWC_), a_data);
- float b_data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11};
- nntrainer::Tensor b(nntrainer::TensorDim(1, 4, 3, 1, NHWC_), b_data);
- float answer_data[] = {20, 23, 26, 29};
- nntrainer::Tensor answer(nntrainer::TensorDim(1, 4, 1, 1, NHWC_),
-=======
nntrainer::Tensor a(nntrainer::TensorDim(1, 1, 3, 1, NHWC_, FP32_), a_data);
float b_data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11};
nntrainer::Tensor b(nntrainer::TensorDim(1, 4, 3, 1, NHWC_, FP32_), b_data);
float answer_data[] = {20, 23, 26, 29};
nntrainer::Tensor answer(nntrainer::TensorDim(1, 4, 1, 1, NHWC_, FP32_),
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
answer_data);
nntrainer::Tensor ret = a.dot(b, true, false);
EXPECT_EQ(ret, answer);
}
{
float a_data[] = {0, 1, 2};
-<<<<<<< HEAD
- nntrainer::Tensor a(nntrainer::TensorDim(1, 3, 1, 1, NHWC_), a_data);
- float b_data[] = {0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11};
- nntrainer::Tensor b(nntrainer::TensorDim(1, 3, 4, 1, NHWC_), b_data);
- float answer_data[] = {20, 23, 26, 29};
- nntrainer::Tensor answer(nntrainer::TensorDim(1, 4, 1, 1, NHWC_),
-=======
nntrainer::Tensor a(nntrainer::TensorDim(1, 3, 1, 1, NHWC_, FP32_), a_data);
float b_data[] = {0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11};
nntrainer::Tensor b(nntrainer::TensorDim(1, 3, 4, 1, NHWC_, FP32_), b_data);
float answer_data[] = {20, 23, 26, 29};
nntrainer::Tensor answer(nntrainer::TensorDim(1, 4, 1, 1, NHWC_, FP32_),
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
answer_data);
nntrainer::Tensor ret = a.dot(b, false, true);
EXPECT_EQ(ret, answer);
}
{
float a_data[] = {0, 1, 2};
-<<<<<<< HEAD
- nntrainer::Tensor a(nntrainer::TensorDim(1, 1, 3, 1, NHWC_), a_data);
- float b_data[] = {0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11};
- nntrainer::Tensor b(nntrainer::TensorDim(1, 3, 4, 1, NHWC_), b_data);
- float answer_data[] = {20, 23, 26, 29};
- nntrainer::Tensor answer(nntrainer::TensorDim(1, 4, 1, 1, NHWC_),
-=======
nntrainer::Tensor a(nntrainer::TensorDim(1, 1, 3, 1, NHWC_, FP32_), a_data);
float b_data[] = {0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11};
nntrainer::Tensor b(nntrainer::TensorDim(1, 3, 4, 1, NHWC_, FP32_), b_data);
float answer_data[] = {20, 23, 26, 29};
nntrainer::Tensor answer(nntrainer::TensorDim(1, 4, 1, 1, NHWC_, FP32_),
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
answer_data);
nntrainer::Tensor ret = a.dot(b, true, true);
EXPECT_EQ(ret, answer);
}
{
float a_data[] = {0, 1, 2};
-<<<<<<< HEAD
- nntrainer::Tensor a(nntrainer::TensorDim(1, 3, 1, 1, NHWC_), a_data);
- float b_data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11};
- nntrainer::Tensor b(nntrainer::TensorDim(1, 4, 3, 1, NHWC_), b_data);
- float answer_data[] = {20, 23, 26, 29};
- nntrainer::Tensor answer(nntrainer::TensorDim(1, 4, 1, 1, NHWC_),
-=======
nntrainer::Tensor a(nntrainer::TensorDim(1, 3, 1, 1, NHWC_, FP32_), a_data);
float b_data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11};
nntrainer::Tensor b(nntrainer::TensorDim(1, 4, 3, 1, NHWC_, FP32_), b_data);
float answer_data[] = {20, 23, 26, 29};
nntrainer::Tensor answer(nntrainer::TensorDim(1, 4, 1, 1, NHWC_, FP32_),
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
answer_data);
nntrainer::Tensor ret = a.dot(b, false, false);
EXPECT_EQ(ret, answer);
}
{
float a_data[] = {0, 1, 2};
-<<<<<<< HEAD
- nntrainer::Tensor a(nntrainer::TensorDim(1, 1, 3, 1, NHWC_), a_data);
- float b_data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11};
- nntrainer::Tensor b(nntrainer::TensorDim(1, 4, 3, 1, NHWC_), b_data);
- float answer_data[] = {20, 23, 26, 29};
- nntrainer::Tensor answer(nntrainer::TensorDim(1, 4, 1, 1, NHWC_),
-=======
nntrainer::Tensor a(nntrainer::TensorDim(1, 1, 3, 1, NHWC_, FP32_), a_data);
float b_data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11};
nntrainer::Tensor b(nntrainer::TensorDim(1, 4, 3, 1, NHWC_, FP32_), b_data);
float answer_data[] = {20, 23, 26, 29};
nntrainer::Tensor answer(nntrainer::TensorDim(1, 4, 1, 1, NHWC_, FP32_),
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
answer_data);
nntrainer::Tensor ret = a.dot(b, true, false);
EXPECT_EQ(ret, answer);
}
{
float a_data[] = {0, 1, 2};
-<<<<<<< HEAD
- nntrainer::Tensor a(nntrainer::TensorDim(1, 3, 1, 1, NHWC_), a_data);
- float b_data[] = {0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11};
- nntrainer::Tensor b(nntrainer::TensorDim(1, 3, 4, 1, NHWC_), b_data);
- float answer_data[] = {20, 23, 26, 29};
- nntrainer::Tensor answer(nntrainer::TensorDim(1, 4, 1, 1, NHWC_),
-=======
nntrainer::Tensor a(nntrainer::TensorDim(1, 3, 1, 1, NHWC_, FP32_), a_data);
float b_data[] = {0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11};
nntrainer::Tensor b(nntrainer::TensorDim(1, 3, 4, 1, NHWC_, FP32_), b_data);
float answer_data[] = {20, 23, 26, 29};
nntrainer::Tensor answer(nntrainer::TensorDim(1, 4, 1, 1, NHWC_, FP32_),
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
answer_data);
nntrainer::Tensor ret = a.dot(b, false, true);
EXPECT_EQ(ret, answer);
}
{
float a_data[] = {0, 1, 2};
-<<<<<<< HEAD
- nntrainer::Tensor a(nntrainer::TensorDim(1, 1, 3, 1, NHWC_), a_data);
- float b_data[] = {0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11};
- nntrainer::Tensor b(nntrainer::TensorDim(1, 3, 4, 1, NHWC_), b_data);
- float answer_data[] = {20, 23, 26, 29};
- nntrainer::Tensor answer(nntrainer::TensorDim(1, 4, 1, 1, NHWC_),
-=======
nntrainer::Tensor a(nntrainer::TensorDim(1, 1, 3, 1, NHWC_, FP32_), a_data);
float b_data[] = {0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11};
nntrainer::Tensor b(nntrainer::TensorDim(1, 3, 4, 1, NHWC_, FP32_), b_data);
float answer_data[] = {20, 23, 26, 29};
nntrainer::Tensor answer(nntrainer::TensorDim(1, 4, 1, 1, NHWC_, FP32_),
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
answer_data);
nntrainer::Tensor ret = a.dot(b, true, true);
EXPECT_EQ(ret, answer);
}
{
float a_data[] = {0, 1, 2};
-<<<<<<< HEAD
- nntrainer::Tensor a(nntrainer::TensorDim(1, 3, 1, 1, NHWC_), a_data);
- float b_data[] = {0, 1, 2, 3, 4, 5};
- nntrainer::Tensor b(nntrainer::TensorDim(1, 2, 3, 1, NHWC_), b_data);
- float answer_data[] = {10, 13};
- nntrainer::Tensor answer(nntrainer::TensorDim(1, 2, 1, 1, NHWC_),
-=======
nntrainer::Tensor a(nntrainer::TensorDim(1, 3, 1, 1, NHWC_, FP32_), a_data);
float b_data[] = {0, 1, 2, 3, 4, 5};
nntrainer::Tensor b(nntrainer::TensorDim(1, 2, 3, 1, NHWC_, FP32_), b_data);
float answer_data[] = {10, 13};
nntrainer::Tensor answer(nntrainer::TensorDim(1, 2, 1, 1, NHWC_, FP32_),
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
answer_data);
nntrainer::Tensor ret = a.dot(b, false, false);
EXPECT_EQ(ret, answer);
}
{
float a_data[] = {0, 1, 2};
-<<<<<<< HEAD
- nntrainer::Tensor a(nntrainer::TensorDim(1, 1, 3, 1, NHWC_), a_data);
- float b_data[] = {0, 1, 2, 3, 4, 5};
- nntrainer::Tensor b(nntrainer::TensorDim(1, 2, 3, 1, NHWC_), b_data);
- float answer_data[] = {10, 13};
- nntrainer::Tensor answer(nntrainer::TensorDim(1, 2, 1, 1, NHWC_),
-=======
nntrainer::Tensor a(nntrainer::TensorDim(1, 1, 3, 1, NHWC_, FP32_), a_data);
float b_data[] = {0, 1, 2, 3, 4, 5};
nntrainer::Tensor b(nntrainer::TensorDim(1, 2, 3, 1, NHWC_, FP32_), b_data);
float answer_data[] = {10, 13};
nntrainer::Tensor answer(nntrainer::TensorDim(1, 2, 1, 1, NHWC_, FP32_),
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
answer_data);
nntrainer::Tensor ret = a.dot(b, true, false);
EXPECT_EQ(ret, answer);
}
{
float a_data[] = {0, 1, 2};
-<<<<<<< HEAD
- nntrainer::Tensor a(nntrainer::TensorDim(1, 3, 1, 1, NHWC_), a_data);
- float b_data[] = {0, 2, 4, 1, 3, 5};
- nntrainer::Tensor b(nntrainer::TensorDim(1, 3, 2, 1, NHWC_), b_data);
- float answer_data[] = {10, 13};
- nntrainer::Tensor answer(nntrainer::TensorDim(1, 2, 1, 1, NHWC_),
-=======
nntrainer::Tensor a(nntrainer::TensorDim(1, 3, 1, 1, NHWC_, FP32_), a_data);
float b_data[] = {0, 2, 4, 1, 3, 5};
nntrainer::Tensor b(nntrainer::TensorDim(1, 3, 2, 1, NHWC_, FP32_), b_data);
float answer_data[] = {10, 13};
nntrainer::Tensor answer(nntrainer::TensorDim(1, 2, 1, 1, NHWC_, FP32_),
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
answer_data);
nntrainer::Tensor ret = a.dot(b, false, true);
EXPECT_EQ(ret, answer);
}
{
float a_data[] = {0, 1, 2};
-<<<<<<< HEAD
- nntrainer::Tensor a(nntrainer::TensorDim(1, 1, 3, 1, NHWC_), a_data);
- float b_data[] = {0, 2, 4, 1, 3, 5};
- nntrainer::Tensor b(nntrainer::TensorDim(1, 3, 2, 1, NHWC_), b_data);
- float answer_data[] = {10, 13};
- nntrainer::Tensor answer(nntrainer::TensorDim(1, 2, 1, 1, NHWC_),
-=======
nntrainer::Tensor a(nntrainer::TensorDim(1, 1, 3, 1, NHWC_, FP32_), a_data);
float b_data[] = {0, 2, 4, 1, 3, 5};
nntrainer::Tensor b(nntrainer::TensorDim(1, 3, 2, 1, NHWC_, FP32_), b_data);
float answer_data[] = {10, 13};
nntrainer::Tensor answer(nntrainer::TensorDim(1, 2, 1, 1, NHWC_, FP32_),
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
answer_data);
nntrainer::Tensor ret = a.dot(b, true, true);
EXPECT_EQ(ret, answer);
}
TEST(nntrainer_Tensor, empty_nhwc_02) {
-<<<<<<< HEAD
- nntrainer::Tensor t(nntrainer::TensorDim(1, 2, 3, 4, NHWC_), false);
-=======
nntrainer::Tensor t(nntrainer::TensorDim(1, 2, 3, 4, NHWC_, FP32_), false);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
EXPECT_FALSE(t.empty());
}
TEST(nntrainer_Tensor, empty_nhwc_03) {
-<<<<<<< HEAD
- nntrainer::Tensor t(nntrainer::TensorDim(1, 2, 3, 4, NHWC_), true);
-=======
nntrainer::Tensor t(nntrainer::TensorDim(1, 2, 3, 4, NHWC_, FP32_), true);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
EXPECT_FALSE(t.empty());
}
TEST(nntrainer_Tensor, fill_p) {
/// same dimension, buffer size
{
-<<<<<<< HEAD
- nntrainer::Tensor target(3, 2, 4, 5, NHWC_);
- nntrainer::Tensor original = randUniform(3, 2, 4, 5, -1.0f, 1.0f, NHWC_);
-=======
nntrainer::Tensor target(3, 2, 4, 5, NHWC_, FP32_);
nntrainer::Tensor original = randUniform(3, 2, 4, 5, -1.0f, 1.0f, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
target.fill(original, false);
EXPECT_EQ(target, original);
/// uninitialized with initialized flag is true
{
nntrainer::Tensor target;
-<<<<<<< HEAD
- nntrainer::Tensor original = randUniform(3, 2, 4, 5, -1.0f, 1.0f, NHWC_);
-=======
nntrainer::Tensor original = randUniform(3, 2, 4, 5, -1.0f, 1.0f, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
target.fill(original, true);
EXPECT_EQ(target, original);
TEST(nntrainer_Tensor, fill_uninitialized_n) {
nntrainer::Tensor target;
-<<<<<<< HEAD
- nntrainer::Tensor original = randUniform(3, 1, 2, 3, -1.0f, 1.0f, NHWC_);
-=======
nntrainer::Tensor original = randUniform(3, 1, 2, 3, -1.0f, 1.0f, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
EXPECT_THROW(target.fill(original, false), std::invalid_argument);
}
TEST(nntrainer_Tensor, fill_different_dimension_n) {
-<<<<<<< HEAD
- nntrainer::Tensor target(3, 1, 3, 2, NHWC_);
- nntrainer::Tensor original = randUniform(3, 1, 2, 3, -1.0f, 1.0f, NHWC_);
-=======
nntrainer::Tensor target(3, 1, 3, 2, NHWC_, FP32_);
nntrainer::Tensor original = randUniform(3, 1, 2, 3, -1.0f, 1.0f, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
EXPECT_THROW(target.fill(original, false), std::invalid_argument);
}
int height = 3;
int width = 10;
-<<<<<<< HEAD
- nntrainer::Tensor input(batch, channel, height, width, NHWC_);
-=======
nntrainer::Tensor input(batch, channel, height, width, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
GEN_TEST_INPUT_NHWC(input, i * (height * width * channel) +
j * (width * channel) + k * channel + 1);
nntrainer::Tensor result = input.add_strided(input);
int height = 3;
int width = 10;
-<<<<<<< HEAD
- nntrainer::Tensor input(batch, channel, height, width, NHWC_);
- GEN_TEST_INPUT_NHWC(input, i * (height * width * channel) +
- j * (width * channel) + k * channel + 1);
-
- nntrainer::Tensor test(batch - 1, height - 1, width - 1, channel, NHWC_);
-=======
nntrainer::Tensor input(batch, channel, height, width, NHWC_, FP32_);
GEN_TEST_INPUT_NHWC(input, i * (height * width * channel) +
j * (width * channel) + k * channel + 1);
nntrainer::Tensor test(batch - 1, height - 1, width - 1, channel, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
EXPECT_THROW({ input.add_strided(test); }, std::invalid_argument);
}
int height = 3;
int width = 10;
-<<<<<<< HEAD
- nntrainer::TensorDim dim(batch, height, width, channel, NHWC_);
-=======
nntrainer::TensorDim dim(batch, height, width, channel, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
nntrainer::Tensor input(dim, false);
nntrainer::Tensor test(dim);
int height = 3;
int width = 10;
-<<<<<<< HEAD
- nntrainer::TensorDim dim(batch, height, width, channel, NHWC_);
-=======
nntrainer::TensorDim dim(batch, height, width, channel, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
nntrainer::Tensor input(dim);
GEN_TEST_INPUT_NHWC(input, i * (height * width * channel) +
int height = 3;
int width = 10;
-<<<<<<< HEAD
- nntrainer::Tensor input(batch, channel, height, width, NHWC_);
-=======
nntrainer::Tensor input(batch, channel, height, width, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
GEN_TEST_INPUT_NHWC(input, i * (height * width * channel) +
j * (width * channel) + k * channel + 1);
nntrainer::Tensor result = input.add_strided(input, 10.0);
int height = 3;
int width = 10;
-<<<<<<< HEAD
- nntrainer::Tensor input(batch, channel, height, width, NHWC_);
-=======
nntrainer::Tensor input(batch, channel, height, width, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
GEN_TEST_INPUT_NHWC(input, i * (batch * height * width) +
j * (height * width) + k + 1);
int height = 3;
int width = 10;
-<<<<<<< HEAD
- nntrainer::Tensor input(batch, channel, height, width, NHWC_);
- GEN_TEST_INPUT_NHWC(input, i * (height * width) + j * (height * width) +
- k * width + l);
-
- nntrainer::Tensor test(batch - 1, height - 1, width - 1, NHWC_);
-=======
nntrainer::Tensor input(batch, channel, height, width, NHWC_, FP32_);
GEN_TEST_INPUT_NHWC(input, i * (height * width) + j * (height * width) +
k * width + l);
nntrainer::Tensor test(batch - 1, height - 1, width - 1, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
EXPECT_THROW({ input.multiply_strided(test); }, std::invalid_argument);
}
int height = 3;
int width = 10;
-<<<<<<< HEAD
- nntrainer::TensorDim dim(batch, channel, height, width, NHWC_);
-=======
nntrainer::TensorDim dim(batch, channel, height, width, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
// input is not allocated now : alloc_now == false
nntrainer::Tensor input(dim, false);
nntrainer::Tensor test(dim);
int height = 3;
int width = 10;
-<<<<<<< HEAD
- nntrainer::TensorDim dim(batch, channel, height, width, NHWC_);
-=======
nntrainer::TensorDim dim(batch, channel, height, width, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
nntrainer::Tensor input(dim);
GEN_TEST_INPUT_NHWC(input, i * (height * width * channel) +
int height = 3;
int width = 10;
-<<<<<<< HEAD
- nntrainer::TensorDim dim(batch, channel, height, width, NHWC_);
-=======
nntrainer::TensorDim dim(batch, channel, height, width, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
nntrainer::Tensor input(dim);
GEN_TEST_INPUT_NHWC(input, i * (height * width * channel) +
int height = 3;
int width = 10;
-<<<<<<< HEAD
- nntrainer::Tensor input(batch, channel, height, width, NHWC_);
- GEN_TEST_INPUT_NHWC(input, i * (batch * height * width) +
- j * (height * width) + k + 1);
-
- nntrainer::Tensor output(batch, channel, height, width, NHWC_);
-=======
nntrainer::Tensor input(batch, channel, height, width, NHWC_, FP32_);
GEN_TEST_INPUT_NHWC(input, i * (batch * height * width) +
j * (height * width) + k + 1);
nntrainer::Tensor output(batch, channel, height, width, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
GEN_TEST_INPUT_NHWC(output, i * (batch * height * width) +
j * (height * width) + k + 1);
}
TEST(nntrainer_Tensor, allocate_02_nhwc_p) {
-<<<<<<< HEAD
- nntrainer::Tensor t(nntrainer::TensorDim(1, 2, 3, 4, NHWC_), false);
-=======
nntrainer::Tensor t(nntrainer::TensorDim(1, 2, 3, 4, NHWC_, FP32_), false);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
EXPECT_FALSE(t.isAllocated());
t.allocate();
}
TEST(nntrainer_Tensor, allocate_03_nhwc_p) {
-<<<<<<< HEAD
- nntrainer::Tensor t(nntrainer::TensorDim(1, 2, 3, 4, NHWC_), true);
-=======
nntrainer::Tensor t(nntrainer::TensorDim(1, 2, 3, 4, NHWC_, FP32_), true);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
EXPECT_TRUE(t.isAllocated());
t.allocate();
}
TEST(nntrainer_Tensor, initialize_01_nhwc_p) {
-<<<<<<< HEAD
- nntrainer::Tensor t(nntrainer::TensorDim(1, 2, 3, 4, NHWC_), true,
- nntrainer::Tensor::Initializer::ONES);
-
- nntrainer::Tensor golden(1, 2, 3, 4, NHWC_);
-=======
nntrainer::Tensor t(nntrainer::TensorDim(1, 2, 3, 4, NHWC_, FP32_), true,
nntrainer::Tensor::Initializer::ONES);
nntrainer::Tensor golden(1, 2, 3, 4, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
golden.setValue(1);
EXPECT_EQ(golden, t);
}
TEST(nntrainer_Tensor, initialize_02_nhwc_p) {
-<<<<<<< HEAD
- nntrainer::Tensor t(nntrainer::TensorDim(1, 2, 3, 4, NHWC_), true);
-
- nntrainer::Tensor golden(1, 2, 3, 4, NHWC_);
-=======
nntrainer::Tensor t(nntrainer::TensorDim(1, 2, 3, 4, NHWC_, FP32_), true);
nntrainer::Tensor golden(1, 2, 3, 4, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
golden.setValue(1);
EXPECT_NE(golden, t);
}
TEST(nntrainer_Tensor, initialize_03_nhwc_p) {
-<<<<<<< HEAD
- nntrainer::Tensor t(nntrainer::TensorDim(1, 2, 3, 4, NHWC_), false,
- nntrainer::Tensor::Initializer::ONES);
- t.allocate();
-
- nntrainer::Tensor golden(1, 2, 3, 4, NHWC_);
-=======
nntrainer::Tensor t(nntrainer::TensorDim(1, 2, 3, 4, NHWC_, FP32_), false,
nntrainer::Tensor::Initializer::ONES);
t.allocate();
nntrainer::Tensor golden(1, 2, 3, 4, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
golden.setValue(1);
EXPECT_EQ(golden, t);
}
TEST(nntrainer_Tensor, initialize_04_nhwc_p) {
-<<<<<<< HEAD
- nntrainer::Tensor t(nntrainer::TensorDim(1, 2, 3, 4, NHWC_), false);
- t.initialize(nntrainer::Tensor::Initializer::ONES);
- t.allocate();
-
- nntrainer::Tensor golden(1, 2, 3, 4, NHWC_);
-=======
nntrainer::Tensor t(nntrainer::TensorDim(1, 2, 3, 4, NHWC_, FP32_), false);
t.initialize(nntrainer::Tensor::Initializer::ONES);
t.allocate();
nntrainer::Tensor golden(1, 2, 3, 4, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
golden.setValue(1);
EXPECT_EQ(golden, t);
}
TEST(nntrainer_Tensor, initialize_05_nhwc_p) {
-<<<<<<< HEAD
- nntrainer::Tensor t(nntrainer::TensorDim(1, 2, 3, 4, NHWC_), false);
- t.allocate();
-
- nntrainer::Tensor golden(1, 2, 3, 4, NHWC_);
-=======
nntrainer::Tensor t(nntrainer::TensorDim(1, 2, 3, 4, NHWC_, FP32_), false);
t.allocate();
nntrainer::Tensor golden(1, 2, 3, 4, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
golden.setValue(1.f);
/**
}
TEST(nntrainer_Tensor, initialize_06_nhwc_n) {
-<<<<<<< HEAD
- nntrainer::Tensor t(nntrainer::TensorDim(1, 2, 3, 4, NHWC_), true,
- nntrainer::Tensor::Initializer::ONES);
- nntrainer::Tensor golden(nntrainer::TensorDim(1, 2, 3, 4, NHWC_), true,
-=======
nntrainer::Tensor t(nntrainer::TensorDim(1, 2, 3, 4, NHWC_, FP32_), true,
nntrainer::Tensor::Initializer::ONES);
nntrainer::Tensor golden(nntrainer::TensorDim(1, 2, 3, 4, NHWC_, FP32_), true,
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
nntrainer::Tensor::Initializer::ZEROS);
EXPECT_NE(golden, t);
}
TEST(nntrainer_Tensor, initialize_07_nhwc_p) {
-<<<<<<< HEAD
- nntrainer::Tensor t(nntrainer::TensorDim(1, 2, 3, 4, NHWC_), true,
- nntrainer::Tensor::Initializer::ONES);
-
- nntrainer::Tensor golden(1, 2, 3, 4, NHWC_);
-=======
nntrainer::Tensor t(nntrainer::TensorDim(1, 2, 3, 4, NHWC_, FP32_), true,
nntrainer::Tensor::Initializer::ONES);
nntrainer::Tensor golden(1, 2, 3, 4, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
golden.setValue(1);
EXPECT_EQ(golden, t);
}
TEST(nntrainer_Tensor, initialize_08_nhwc_p) {
-<<<<<<< HEAD
- nntrainer::Tensor t(nntrainer::TensorDim(1, 2, 3, 4, NHWC_), true,
- nntrainer::Tensor::Initializer::ONES);
-
- nntrainer::Tensor golden(1, 2, 3, 4, NHWC_);
-=======
nntrainer::Tensor t(nntrainer::TensorDim(1, 2, 3, 4, NHWC_, FP32_), true,
nntrainer::Tensor::Initializer::ONES);
nntrainer::Tensor golden(1, 2, 3, 4, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
golden.setValue(1);
EXPECT_EQ(golden, t);
}
TEST(nntrainer_Tensor, reshape_01_nhwc_n) {
-<<<<<<< HEAD
- nntrainer::Tensor A = constant(1.0f, 3, 4, 5, 6, NHWC_);
-
- EXPECT_THROW(A.reshape(nntrainer::TensorDim(9, 9, 9, 9, NHWC_)),
-=======
nntrainer::Tensor A = constant(1.0f, 3, 4, 5, 6, NHWC_, FP32_);
EXPECT_THROW(A.reshape(nntrainer::TensorDim(9, 9, 9, 9, NHWC_, FP32_)),
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
std::invalid_argument);
}
TEST(nntrainer_Tensor, reshape_02_nhwc_p) {
-<<<<<<< HEAD
- nntrainer::Tensor A = constant(1.0f, 3, 4, 5, 6, NHWC_);
-=======
nntrainer::Tensor A = constant(1.0f, 3, 4, 5, 6, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
nntrainer::TensorDim A_dim = A.getDim();
/** Changing the dim of a tensor only affects local copy of the dim */
int channel = 4;
int height = 5;
int width = 6;
-<<<<<<< HEAD
- nntrainer::Tensor target(3, 4, 5, 6, NHWC_);
- nntrainer::Tensor readed(3, 4, 5, 6, NHWC_);
-=======
nntrainer::Tensor target(3, 4, 5, 6, NHWC_, FP32_);
nntrainer::Tensor readed(3, 4, 5, 6, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
GEN_TEST_INPUT_NHWC(target, i * (height * width * channel) +
j * (width * channel) + k * channel + 1);
int channel = 4;
int height = 5;
int width = 6;
-<<<<<<< HEAD
- nntrainer::Tensor target(3, 4, 5, 6, NHWC_);
- nntrainer::Tensor readed(3, 4, 1, 1, NHWC_);
-=======
nntrainer::Tensor target(3, 4, 5, 6, NHWC_, FP32_);
nntrainer::Tensor readed(3, 4, 1, 1, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
GEN_TEST_INPUT_NHWC(target, i * (height * width * channel) +
j * (width * channel) + k * channel + 1);
}
TEST(nntrainer_Tensor, set_01_nhwc_p) {
-<<<<<<< HEAD
- nntrainer::Tensor tensor = nntrainer::Tensor(1, 1, 1, 1, NHWC_);
-=======
nntrainer::Tensor tensor = nntrainer::Tensor(1, 1, 1, 1, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
tensor.setZero();
EXPECT_EQ(tensor.getValue(0, 0, 0, 0), 0.0);
}
TEST(nntrainer_Tensor, print_small_size_nhwc_p) {
-<<<<<<< HEAD
- nntrainer::Tensor target = constant(1.0, 3, 3, 1, 2, NHWC_);
-=======
nntrainer::Tensor target = constant(1.0, 3, 3, 1, 2, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
std::stringstream ss, expected;
ss << target;
expected << '<' << typeid(target).name() << " at " << &target << ">\n"
<< "data addr: " << target.getData() << '\n'
- << "Shape: 3:3:1:2\n"
+ << "Shape: 3:3:1:2 [ FP32 : NHWC ]\n"
<< " 1 1 1 \n"
<< " 1 1 1 \n"
<< "\n"
}
TEST(nntrainer_Tensor, copy_and_reshape_nhwc_n) {
-<<<<<<< HEAD
- nntrainer::Tensor A = constant(1.0f, 3, 4, 5, 6, NHWC_);
- nntrainer::Tensor B = A;
- nntrainer::Tensor C = A.clone();
-
- EXPECT_THROW(B.reshape(nntrainer::TensorDim(9, 9, 9, 9, NHWC_)),
-=======
nntrainer::Tensor A = constant(1.0f, 3, 4, 5, 6, NHWC_, FP32_);
nntrainer::Tensor B = A;
nntrainer::Tensor C = A.clone();
EXPECT_THROW(B.reshape(nntrainer::TensorDim(9, 9, 9, 9, NHWC_, FP32_)),
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
std::invalid_argument);
}
TEST(nntrainer_Tensor, copy_and_shares_variable_nhwc_p) {
-<<<<<<< HEAD
- nntrainer::Tensor A = constant(1.0f, 3, 4, 5, 6, NHWC_);
-=======
nntrainer::Tensor A = constant(1.0f, 3, 4, 5, 6, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
nntrainer::Tensor B = A.clone();
nntrainer::Tensor C = A;
EXPECT_EQ(A, C);
EXPECT_NE(B, C);
-<<<<<<< HEAD
- C.reshape(nntrainer::TensorDim(3, 4, 6, 5, NHWC_));
-=======
C.reshape(nntrainer::TensorDim(3, 4, 6, 5, NHWC_, FP32_));
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
EXPECT_EQ(A.getDim(), B.getDim());
EXPECT_NE(A.getDim(), C.getDim());
}
{
std::vector<nntrainer::Tensor> inputs;
inputs.reserve(2);
-<<<<<<< HEAD
- inputs.emplace_back(ranged(2, 2, 1, 1, NHWC_));
- inputs.emplace_back(ranged(2, 2, 2, 1, NHWC_));
- float answer_data[] = {0, 1, 0, 1, 2, 3, 2, 3, 4, 5, 6, 7};
- nntrainer::Tensor answer(ml::train::TensorDim({2, 2, 3, 1}, NHWC_),
-=======
inputs.emplace_back(ranged(2, 2, 1, 1, NHWC_, FP32_));
inputs.emplace_back(ranged(2, 2, 2, 1, NHWC_, FP32_));
float answer_data[] = {0, 1, 0, 1, 2, 3, 2, 3, 4, 5, 6, 7};
- nntrainer::Tensor answer(ml::train::TensorDim({2, 2, 3, 1}, NHWC_, FP32_),
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
+ nntrainer::Tensor answer(ml::train::TensorDim({2, 2, 3, 1}, {NHWC_, FP32_}),
answer_data);
EXPECT_EQ(nntrainer::Tensor::cat(inputs, 2), answer);
}
{
std::vector<nntrainer::Tensor> inputs;
inputs.reserve(2);
-<<<<<<< HEAD
- inputs.emplace_back(ranged(3, 2, 4, 5, NHWC_));
- inputs.emplace_back(ranged(2, 2, 4, 5, NHWC_));
-=======
inputs.emplace_back(ranged(3, 2, 4, 5, NHWC_, FP32_));
inputs.emplace_back(ranged(2, 2, 4, 5, NHWC_, FP32_));
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
float answer_data[] = {
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74,
75, 76, 77, 78, 79};
-<<<<<<< HEAD
- nntrainer::Tensor answer(ml::train::TensorDim({5, 2, 4, 5}, NHWC_),
-=======
- nntrainer::Tensor answer(ml::train::TensorDim({5, 2, 4, 5}, NHWC_, FP32_),
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
+ nntrainer::Tensor answer(ml::train::TensorDim({5, 2, 4, 5}, {NHWC_, FP32_}),
answer_data);
EXPECT_EQ(nntrainer::Tensor::cat(inputs, 0), answer);
}
{
std::vector<nntrainer::Tensor> inputs;
inputs.reserve(2);
-<<<<<<< HEAD
- inputs.emplace_back(ranged(3, 5, 3, 4, NHWC_));
- inputs.emplace_back(ranged(3, 5, 2, 4, NHWC_));
-=======
inputs.emplace_back(ranged(3, 5, 3, 4, NHWC_, FP32_));
inputs.emplace_back(ranged(3, 5, 2, 4, NHWC_, FP32_));
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
float answer_data[] = {
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27,
86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113,
114, 115, 116, 117, 118, 119};
-<<<<<<< HEAD
- nntrainer::Tensor answer(ml::train::TensorDim({3, 5, 5, 4}, NHWC_),
-=======
- nntrainer::Tensor answer(ml::train::TensorDim({3, 5, 5, 4}, NHWC_, FP32_),
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
+ nntrainer::Tensor answer(ml::train::TensorDim({3, 5, 5, 4}, {NHWC_, FP32_}),
answer_data);
EXPECT_EQ(nntrainer::Tensor::cat(inputs, 2), answer);
}
{
std::vector<nntrainer::Tensor> inputs;
inputs.reserve(2);
-<<<<<<< HEAD
- inputs.emplace_back(ranged(3, 5, 2, 1, NHWC_));
- inputs.emplace_back(ranged(3, 5, 2, 2, NHWC_));
-=======
inputs.emplace_back(ranged(3, 5, 2, 1, NHWC_, FP32_));
inputs.emplace_back(ranged(3, 5, 2, 2, NHWC_, FP32_));
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
float answer_data[] = {
0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 5, 6, 7,
8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 10, 11, 12, 13, 14, 20,
21, 22, 23, 24, 25, 26, 27, 28, 29, 15, 16, 17, 18, 19, 30, 31, 32, 33,
34, 35, 36, 37, 38, 39, 20, 21, 22, 23, 24, 40, 41, 42, 43, 44, 45, 46,
47, 48, 49, 25, 26, 27, 28, 29, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59};
-<<<<<<< HEAD
- nntrainer::Tensor answer(ml::train::TensorDim({3, 5, 2, 3}, NHWC_),
-=======
- nntrainer::Tensor answer(ml::train::TensorDim({3, 5, 2, 3}, NHWC_, FP32_),
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
+ nntrainer::Tensor answer(ml::train::TensorDim({3, 5, 2, 3}, {NHWC_, FP32_}),
answer_data);
EXPECT_EQ(nntrainer::Tensor::cat(inputs, 3), answer);
}
{
std::vector<nntrainer::Tensor> inputs;
inputs.reserve(3);
-<<<<<<< HEAD
- inputs.emplace_back(ranged(3, 1, 2, 4, NHWC_));
- inputs.emplace_back(ranged(3, 3, 2, 4, NHWC_));
- inputs.emplace_back(ranged(3, 2, 2, 4, NHWC_));
-=======
inputs.emplace_back(ranged(3, 1, 2, 4, NHWC_, FP32_));
inputs.emplace_back(ranged(3, 3, 2, 4, NHWC_, FP32_));
inputs.emplace_back(ranged(3, 2, 2, 4, NHWC_, FP32_));
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
float answer_data[] = {
0, 0, 1, 2, 0, 1, 1, 3, 4, 5, 2, 3, 2, 6, 7, 8, 4, 5,
3, 9, 10, 11, 6, 7, 4, 12, 13, 14, 8, 9, 5, 15, 16, 17, 10, 11,
15, 45, 46, 47, 30, 31, 16, 48, 49, 50, 32, 33, 17, 51, 52, 53, 34, 35,
18, 54, 55, 56, 36, 37, 19, 57, 58, 59, 38, 39, 20, 60, 61, 62, 40, 41,
21, 63, 64, 65, 42, 43, 22, 66, 67, 68, 44, 45, 23, 69, 70, 71, 46, 47};
-<<<<<<< HEAD
- nntrainer::Tensor answer(ml::train::TensorDim({3, 6, 2, 4}, NHWC_),
-=======
- nntrainer::Tensor answer(ml::train::TensorDim({3, 6, 2, 4}, NHWC_, FP32_),
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
+ nntrainer::Tensor answer(ml::train::TensorDim({3, 6, 2, 4}, {NHWC_, FP32_}),
answer_data);
EXPECT_EQ(nntrainer::Tensor::cat(inputs, 1), answer);
}
{
std::vector<nntrainer::Tensor> inputs;
inputs.reserve(2);
-<<<<<<< HEAD
- inputs.emplace_back(nntrainer::Tensor(2, 1, 1, 2, NHWC_));
- inputs.emplace_back(nntrainer::Tensor(2, 2, 1, 2, NHWC_));
-=======
inputs.emplace_back(nntrainer::Tensor(2, 1, 1, 2, NHWC_, FP32_));
inputs.emplace_back(nntrainer::Tensor(2, 2, 1, 2, NHWC_, FP32_));
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
EXPECT_THROW(nntrainer::Tensor::cat(inputs, 2), std::invalid_argument);
}
}
}
TEST(nntrainer_Tensor, TensorPaddedValue_nhwc_p) {
-<<<<<<< HEAD
- nntrainer::Tensor a = ranged(1, 1, 3, 3, NHWC_);
-=======
nntrainer::Tensor a = ranged(1, 1, 3, 3, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
float default_padded = -1;
for (int i = 0; i < 5; ++i) {
TEST(nntrainer_Tensor, zoneout_mask_01_nhwc_n) {
const float zoneout_rate = 0.3f;
-<<<<<<< HEAD
- nntrainer::Tensor t(10, 10, 10, 10, NHWC_);
- nntrainer::Tensor opposite(20, 20, 20, 20, NHWC_);
-=======
nntrainer::Tensor t(10, 10, 10, 10, NHWC_, FP32_);
nntrainer::Tensor opposite(20, 20, 20, 20, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
EXPECT_THROW(t.zoneout_mask(opposite, zoneout_rate), std::invalid_argument);
}
TEST(nntrainer_Tensor, zoneout_mask_02_nhwc_p) {
const float zoneout_rate = 0.3f;
-<<<<<<< HEAD
- nntrainer::Tensor t(10, 10, 10, 10, NHWC_);
-=======
nntrainer::Tensor t(10, 10, 10, 10, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
nntrainer::Tensor opposite = t.zoneout_mask(zoneout_rate);
constexpr float epsilon = 1e-3;
TEST(nntrainer_Tensor, zoneout_mask_03_nhwc_p) {
const float zoneout_rate = 0.3f;
-<<<<<<< HEAD
- nntrainer::Tensor t(10, 10, 100, 100, NHWC_);
-=======
nntrainer::Tensor t(10, 10, 100, 100, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
nntrainer::Tensor opposite = t.zoneout_mask(zoneout_rate);
constexpr float epsilon = 1e-3;
TEST(nntrainer_Tensor, zoneout_mask_04_nhwc_n) {
const float zoneout_rate = 0.3f;
-<<<<<<< HEAD
- nntrainer::Tensor t(10, 10, 100, 100, NHWC_);
-=======
nntrainer::Tensor t(10, 10, 100, 100, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
nntrainer::Tensor opposite = t.zoneout_mask(zoneout_rate);
constexpr float epsilon = 1e-3;
TEST(nntrainer_Tensor, split_01_nhwc_p) {
{
-<<<<<<< HEAD
- nntrainer::TensorDim ref_dim(3, 5, 2, 4, NHWC_);
- nntrainer::Tensor t = ranged(3, 5, 2, 4, NHWC_);
-=======
nntrainer::TensorDim ref_dim(3, 5, 2, 4, NHWC_, FP32_);
nntrainer::Tensor t = ranged(3, 5, 2, 4, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
std::vector<nntrainer::Tensor> answer;
answer.reserve(3);
{
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 39};
-<<<<<<< HEAD
- answer.emplace_back(ml::train::TensorDim({1, 5, 2, 4}, NHWC_),
-=======
- answer.emplace_back(ml::train::TensorDim({1, 5, 2, 4}, NHWC_, FP32_),
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
+ answer.emplace_back(ml::train::TensorDim({1, 5, 2, 4}, {NHWC_, FP32_}),
answer_data);
}
{
50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
70, 71, 72, 73, 74, 75, 76, 77, 78, 79};
-<<<<<<< HEAD
- answer.emplace_back(ml::train::TensorDim({1, 5, 2, 4}, NHWC_),
-=======
- answer.emplace_back(ml::train::TensorDim({1, 5, 2, 4}, NHWC_, FP32_),
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
+ answer.emplace_back(ml::train::TensorDim({1, 5, 2, 4}, {NHWC_, FP32_}),
answer_data);
}
{
90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
100, 101, 102, 103, 104, 105, 106, 107, 108, 109,
110, 111, 112, 113, 114, 115, 116, 117, 118, 119};
-<<<<<<< HEAD
- answer.emplace_back(ml::train::TensorDim({1, 5, 2, 4}, NHWC_),
-=======
- answer.emplace_back(ml::train::TensorDim({1, 5, 2, 4}, NHWC_, FP32_),
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
+ answer.emplace_back(ml::train::TensorDim({1, 5, 2, 4}, {NHWC_, FP32_}),
answer_data);
}
EXPECT_EQ(t.split(3, 0), answer);
}
{
-<<<<<<< HEAD
- nntrainer::TensorDim ref_dim(3, 5, 2, 4, NHWC_);
- nntrainer::Tensor t = ranged(3, 5, 2, 4, NHWC_);
-=======
nntrainer::TensorDim ref_dim(3, 5, 2, 4, NHWC_, FP32_);
nntrainer::Tensor t = ranged(3, 5, 2, 4, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
std::vector<nntrainer::Tensor> answer;
answer.reserve(2);
{
44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55,
56, 57, 58, 59, 80, 81, 82, 83, 84, 85, 86, 87,
88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99};
-<<<<<<< HEAD
- answer.emplace_back(ml::train::TensorDim({3, 5, 1, 4}, NHWC_),
-=======
- answer.emplace_back(ml::train::TensorDim({3, 5, 1, 4}, NHWC_, FP32_),
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
+ answer.emplace_back(ml::train::TensorDim({3, 5, 1, 4}, {NHWC_, FP32_}),
answer_data);
}
{
70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
100, 101, 102, 103, 104, 105, 106, 107, 108, 109,
110, 111, 112, 113, 114, 115, 116, 117, 118, 119};
-<<<<<<< HEAD
- answer.emplace_back(ml::train::TensorDim({3, 5, 1, 4}, NHWC_),
-=======
- answer.emplace_back(ml::train::TensorDim({3, 5, 1, 4}, NHWC_, FP32_),
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
+ answer.emplace_back(ml::train::TensorDim({3, 5, 1, 4}, {NHWC_, FP32_}),
answer_data);
}
EXPECT_EQ(t.split(2, 2), answer);
}
{
-<<<<<<< HEAD
- nntrainer::TensorDim ref_dim(3, 5, 2, 4, NHWC_);
- nntrainer::Tensor t = ranged(3, 5, 2, 4, NHWC_);
-=======
nntrainer::TensorDim ref_dim(3, 5, 2, 4, NHWC_, FP32_);
nntrainer::Tensor t = ranged(3, 5, 2, 4, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
std::vector<nntrainer::Tensor> answer;
answer.reserve(2);
{
25, 26, 27, 28, 29, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 80, 81, 82, 83, 84,
85, 86, 87, 88, 89, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109};
-<<<<<<< HEAD
- answer.emplace_back(ml::train::TensorDim({3, 5, 2, 2}, NHWC_),
-=======
- answer.emplace_back(ml::train::TensorDim({3, 5, 2, 2}, NHWC_, FP32_),
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
+ answer.emplace_back(ml::train::TensorDim({3, 5, 2, 2}, {NHWC_, FP32_}),
answer_data);
}
{
35, 36, 37, 38, 39, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 90, 91, 92, 93, 94,
95, 96, 97, 98, 99, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119};
-<<<<<<< HEAD
- answer.emplace_back(ml::train::TensorDim({3, 5, 2, 2}, NHWC_),
-=======
- answer.emplace_back(ml::train::TensorDim({3, 5, 2, 2}, NHWC_, FP32_),
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
+ answer.emplace_back(ml::train::TensorDim({3, 5, 2, 2}, {NHWC_, FP32_}),
answer_data);
}
EXPECT_EQ(t.split(2, 3), answer);
}
{
-<<<<<<< HEAD
- nntrainer::TensorDim ref_dim(3, 5, 2, 4, NHWC_);
- nntrainer::Tensor t = ranged(3, 5, 2, 4, NHWC_);
-=======
nntrainer::TensorDim ref_dim(3, 5, 2, 4, NHWC_, FP32_);
nntrainer::Tensor t = ranged(3, 5, 2, 4, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
std::vector<nntrainer::Tensor> answer;
answer.reserve(5);
{
float answer_data[] = {0, 5, 10, 15, 20, 25, 30, 35,
40, 45, 50, 55, 60, 65, 70, 75,
80, 85, 90, 95, 100, 105, 110, 115};
-<<<<<<< HEAD
- answer.emplace_back(ml::train::TensorDim({3, 1, 2, 4}, NHWC_),
-=======
- answer.emplace_back(ml::train::TensorDim({3, 1, 2, 4}, NHWC_, FP32_),
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
+ answer.emplace_back(ml::train::TensorDim({3, 1, 2, 4}, {NHWC_, FP32_}),
answer_data);
}
{
float answer_data[] = {1, 6, 11, 16, 21, 26, 31, 36,
41, 46, 51, 56, 61, 66, 71, 76,
81, 86, 91, 96, 101, 106, 111, 116};
-<<<<<<< HEAD
- answer.emplace_back(ml::train::TensorDim({3, 1, 2, 4}, NHWC_),
-=======
- answer.emplace_back(ml::train::TensorDim({3, 1, 2, 4}, NHWC_, FP32_),
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
+ answer.emplace_back(ml::train::TensorDim({3, 1, 2, 4}, {NHWC_, FP32_}),
answer_data);
}
{
float answer_data[] = {2, 7, 12, 17, 22, 27, 32, 37,
42, 47, 52, 57, 62, 67, 72, 77,
82, 87, 92, 97, 102, 107, 112, 117};
-<<<<<<< HEAD
- answer.emplace_back(ml::train::TensorDim({3, 1, 2, 4}, NHWC_),
-=======
- answer.emplace_back(ml::train::TensorDim({3, 1, 2, 4}, NHWC_, FP32_),
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
+ answer.emplace_back(ml::train::TensorDim({3, 1, 2, 4}, {NHWC_, FP32_}),
answer_data);
}
{
float answer_data[] = {3, 8, 13, 18, 23, 28, 33, 38,
43, 48, 53, 58, 63, 68, 73, 78,
83, 88, 93, 98, 103, 108, 113, 118};
-<<<<<<< HEAD
- answer.emplace_back(ml::train::TensorDim({3, 1, 2, 4}, NHWC_),
-=======
- answer.emplace_back(ml::train::TensorDim({3, 1, 2, 4}, NHWC_, FP32_),
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
+ answer.emplace_back(ml::train::TensorDim({3, 1, 2, 4}, {NHWC_, FP32_}),
answer_data);
}
{
float answer_data[] = {4, 9, 14, 19, 24, 29, 34, 39,
44, 49, 54, 59, 64, 69, 74, 79,
84, 89, 94, 99, 104, 109, 114, 119};
-<<<<<<< HEAD
- answer.emplace_back(ml::train::TensorDim({3, 1, 2, 4}, NHWC_),
-=======
- answer.emplace_back(ml::train::TensorDim({3, 1, 2, 4}, NHWC_, FP32_),
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
+ answer.emplace_back(ml::train::TensorDim({3, 1, 2, 4}, {NHWC_, FP32_}),
answer_data);
}
EXPECT_EQ(t.split(5, 1), answer);
}
{
-<<<<<<< HEAD
- nntrainer::TensorDim ref_dim(1, 6, 1, 4, NHWC_);
- nntrainer::Tensor t = ranged(1, 6, 1, 4, NHWC_);
-=======
nntrainer::TensorDim ref_dim(1, 6, 1, 4, NHWC_, FP32_);
nntrainer::Tensor t = ranged(1, 6, 1, 4, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
std::vector<nntrainer::Tensor> answer;
answer.reserve(2);
{
float answer_data[] = {0, 1, 2, 6, 7, 8, 12, 13, 14, 18, 19, 20};
-<<<<<<< HEAD
- answer.emplace_back(ml::train::TensorDim({1, 3, 1, 4}, NHWC_),
-=======
- answer.emplace_back(ml::train::TensorDim({1, 3, 1, 4}, NHWC_, FP32_),
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
+ answer.emplace_back(ml::train::TensorDim({1, 3, 1, 4}, {NHWC_, FP32_}),
answer_data);
}
{
float answer_data[] = {3, 4, 5, 9, 10, 11, 15, 16, 17, 21, 22, 23};
-<<<<<<< HEAD
- answer.emplace_back(ml::train::TensorDim({1, 3, 1, 4}, NHWC_),
-=======
- answer.emplace_back(ml::train::TensorDim({1, 3, 1, 4}, NHWC_, FP32_),
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
+ answer.emplace_back(ml::train::TensorDim({1, 3, 1, 4}, {NHWC_, FP32_}),
answer_data);
}
EXPECT_EQ(t.split(2, 1), answer);
}
TEST(nntrainer_Tensor, split_02_nhwc_n) {
-<<<<<<< HEAD
- nntrainer::Tensor t(1, 1, 1, 1, NHWC_);
-=======
nntrainer::Tensor t(1, 1, 1, 1, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
EXPECT_THROW(t.split(0, 0), std::invalid_argument);
}
TEST(nntrainer_Tensor, split_03_nhwc_n) {
-<<<<<<< HEAD
- nntrainer::Tensor t(3, 1, 1, 1, NHWC_);
-=======
nntrainer::Tensor t(3, 1, 1, 1, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
EXPECT_THROW(t.split(2, 0), std::invalid_argument);
}
TEST(nntrainer_Tensor, split_04_nhwc_p) {
{
-<<<<<<< HEAD
- nntrainer::TensorDim ref_dim(3, 5, 2, 4, NHWC_);
- nntrainer::Tensor t = ranged(3, 5, 2, 4, NHWC_);
-=======
nntrainer::TensorDim ref_dim(3, 5, 2, 4, NHWC_, FP32_);
nntrainer::Tensor t = ranged(3, 5, 2, 4, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
std::vector<nntrainer::Tensor> answer;
answer.reserve(2);
{
32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79};
-<<<<<<< HEAD
- answer.emplace_back(ml::train::TensorDim({2, 5, 2, 4}, NHWC_),
-=======
- answer.emplace_back(ml::train::TensorDim({2, 5, 2, 4}, NHWC_, FP32_),
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
+ answer.emplace_back(ml::train::TensorDim({2, 5, 2, 4}, {NHWC_, FP32_}),
answer_data);
}
{
90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
100, 101, 102, 103, 104, 105, 106, 107, 108, 109,
110, 111, 112, 113, 114, 115, 116, 117, 118, 119};
-<<<<<<< HEAD
- answer.emplace_back(ml::train::TensorDim({1, 5, 2, 4}, NHWC_),
-=======
- answer.emplace_back(ml::train::TensorDim({1, 5, 2, 4}, NHWC_, FP32_),
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
+ answer.emplace_back(ml::train::TensorDim({1, 5, 2, 4}, {NHWC_, FP32_}),
answer_data);
}
EXPECT_EQ(t.split({2, 1}, 0), answer);
}
{
-<<<<<<< HEAD
- nntrainer::TensorDim ref_dim(3, 5, 2, 4, NHWC_);
- nntrainer::Tensor t = ranged(3, 5, 2, 4, NHWC_);
-=======
nntrainer::TensorDim ref_dim(3, 5, 2, 4, NHWC_, FP32_);
nntrainer::Tensor t = ranged(3, 5, 2, 4, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
std::vector<nntrainer::Tensor> answer;
answer.reserve(2);
{
44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55,
56, 57, 58, 59, 80, 81, 82, 83, 84, 85, 86, 87,
88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99};
-<<<<<<< HEAD
- answer.emplace_back(ml::train::TensorDim({3, 5, 1, 4}, NHWC_),
-=======
- answer.emplace_back(ml::train::TensorDim({3, 5, 1, 4}, NHWC_, FP32_),
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
+ answer.emplace_back(ml::train::TensorDim({3, 5, 1, 4}, {NHWC_, FP32_}),
answer_data);
}
{
70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
100, 101, 102, 103, 104, 105, 106, 107, 108, 109,
110, 111, 112, 113, 114, 115, 116, 117, 118, 119};
-<<<<<<< HEAD
- answer.emplace_back(ml::train::TensorDim({3, 5, 1, 4}, NHWC_),
-=======
- answer.emplace_back(ml::train::TensorDim({3, 5, 1, 4}, NHWC_, FP32_),
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
+ answer.emplace_back(ml::train::TensorDim({3, 5, 1, 4}, {NHWC_, FP32_}),
answer_data);
}
EXPECT_EQ(t.split({1, 1}, 2), answer);
}
{
-<<<<<<< HEAD
- nntrainer::TensorDim ref_dim(3, 5, 2, 4, NHWC_);
- nntrainer::Tensor t = ranged(3, 5, 2, 4, NHWC_);
-=======
nntrainer::TensorDim ref_dim(3, 5, 2, 4, NHWC_, FP32_);
nntrainer::Tensor t = ranged(3, 5, 2, 4, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
std::vector<nntrainer::Tensor> answer;
answer.reserve(2);
{
25, 26, 27, 28, 29, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 80, 81, 82, 83, 84,
85, 86, 87, 88, 89, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109};
-<<<<<<< HEAD
- answer.emplace_back(ml::train::TensorDim({3, 5, 2, 2}, NHWC_),
-=======
- answer.emplace_back(ml::train::TensorDim({3, 5, 2, 2}, NHWC_, FP32_),
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
+ answer.emplace_back(ml::train::TensorDim({3, 5, 2, 2}, {NHWC_, FP32_}),
answer_data);
}
{
35, 36, 37, 38, 39, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 90, 91, 92, 93, 94,
95, 96, 97, 98, 99, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119};
-<<<<<<< HEAD
- answer.emplace_back(ml::train::TensorDim({3, 5, 2, 2}, NHWC_),
-=======
- answer.emplace_back(ml::train::TensorDim({3, 5, 2, 2}, NHWC_, FP32_),
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
+ answer.emplace_back(ml::train::TensorDim({3, 5, 2, 2}, {NHWC_, FP32_}),
answer_data);
}
EXPECT_EQ(t.split({2, 2}, 3), answer);
}
{
-<<<<<<< HEAD
- nntrainer::TensorDim ref_dim(3, 5, 2, 4, NHWC_);
- nntrainer::Tensor t = ranged(3, 5, 2, 4, NHWC_);
-=======
nntrainer::TensorDim ref_dim(3, 5, 2, 4, NHWC_, FP32_);
nntrainer::Tensor t = ranged(3, 5, 2, 4, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
std::vector<nntrainer::Tensor> answer;
answer.reserve(3);
{
float answer_data[] = {0, 5, 10, 15, 20, 25, 30, 35,
40, 45, 50, 55, 60, 65, 70, 75,
80, 85, 90, 95, 100, 105, 110, 115};
-<<<<<<< HEAD
- answer.emplace_back(ml::train::TensorDim({3, 1, 2, 4}, NHWC_),
-=======
- answer.emplace_back(ml::train::TensorDim({3, 1, 2, 4}, NHWC_, FP32_),
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
+ answer.emplace_back(ml::train::TensorDim({3, 1, 2, 4}, {NHWC_, FP32_}),
answer_data);
}
{
51, 52, 53, 56, 57, 58, 61, 62, 63, 66, 67, 68, 71, 72, 73,
76, 77, 78, 81, 82, 83, 86, 87, 88, 91, 92, 93, 96, 97, 98,
101, 102, 103, 106, 107, 108, 111, 112, 113, 116, 117, 118};
-<<<<<<< HEAD
- answer.emplace_back(ml::train::TensorDim({3, 3, 2, 4}, NHWC_),
-=======
- answer.emplace_back(ml::train::TensorDim({3, 3, 2, 4}, NHWC_, FP32_),
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
+ answer.emplace_back(ml::train::TensorDim({3, 3, 2, 4}, {NHWC_, FP32_}),
answer_data);
}
{
float answer_data[] = {4, 9, 14, 19, 24, 29, 34, 39,
44, 49, 54, 59, 64, 69, 74, 79,
84, 89, 94, 99, 104, 109, 114, 119};
-<<<<<<< HEAD
- answer.emplace_back(ml::train::TensorDim({3, 1, 2, 4}, NHWC_),
-=======
- answer.emplace_back(ml::train::TensorDim({3, 1, 2, 4}, NHWC_, FP32_),
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
+ answer.emplace_back(ml::train::TensorDim({3, 1, 2, 4}, {NHWC_, FP32_}),
answer_data);
}
EXPECT_EQ(t.split({1, 3, 1}, 1), answer);
}
{
-<<<<<<< HEAD
- nntrainer::TensorDim ref_dim(3, 5, 2, 4, NHWC_);
- nntrainer::Tensor t = ranged(3, 5, 2, 4, NHWC_);
-=======
nntrainer::TensorDim ref_dim(3, 5, 2, 4, NHWC_, FP32_);
nntrainer::Tensor t = ranged(3, 5, 2, 4, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
std::vector<nntrainer::Tensor> answer;
answer.reserve(3);
{
0, 1, 5, 6, 10, 11, 15, 16, 20, 21, 25, 26, 30, 31, 35, 36,
40, 41, 45, 46, 50, 51, 55, 56, 60, 61, 65, 66, 70, 71, 75, 76,
80, 81, 85, 86, 90, 91, 95, 96, 100, 101, 105, 106, 110, 111, 115, 116};
-<<<<<<< HEAD
- answer.emplace_back(ml::train::TensorDim({3, 2, 2, 4}, NHWC_),
-=======
- answer.emplace_back(ml::train::TensorDim({3, 2, 2, 4}, NHWC_, FP32_),
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
+ answer.emplace_back(ml::train::TensorDim({3, 2, 2, 4}, {NHWC_, FP32_}),
answer_data);
}
{
2, 3, 7, 8, 12, 13, 17, 18, 22, 23, 27, 28, 32, 33, 37, 38,
42, 43, 47, 48, 52, 53, 57, 58, 62, 63, 67, 68, 72, 73, 77, 78,
82, 83, 87, 88, 92, 93, 97, 98, 102, 103, 107, 108, 112, 113, 117, 118};
-<<<<<<< HEAD
- answer.emplace_back(ml::train::TensorDim({3, 2, 2, 4}, NHWC_),
-=======
- answer.emplace_back(ml::train::TensorDim({3, 2, 2, 4}, NHWC_, FP32_),
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
+ answer.emplace_back(ml::train::TensorDim({3, 2, 2, 4}, {NHWC_, FP32_}),
answer_data);
}
{
float answer_data[] = {4, 9, 14, 19, 24, 29, 34, 39,
44, 49, 54, 59, 64, 69, 74, 79,
84, 89, 94, 99, 104, 109, 114, 119};
-<<<<<<< HEAD
- answer.emplace_back(ml::train::TensorDim({3, 1, 2, 4}, NHWC_),
-=======
- answer.emplace_back(ml::train::TensorDim({3, 1, 2, 4}, NHWC_, FP32_),
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
+ answer.emplace_back(ml::train::TensorDim({3, 1, 2, 4}, {NHWC_, FP32_}),
answer_data);
}
EXPECT_EQ(t.split({2, 2, 1}, 1), answer);
}
{
-<<<<<<< HEAD
- nntrainer::TensorDim ref_dim(3, 5, 2, 4, NHWC_);
- nntrainer::Tensor t = ranged(3, 5, 2, 4, NHWC_);
-=======
nntrainer::TensorDim ref_dim(3, 5, 2, 4, NHWC_, FP32_);
nntrainer::Tensor t = ranged(3, 5, 2, 4, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
std::vector<nntrainer::Tensor> answer;
answer.reserve(2);
{
0, 1, 5, 6, 10, 11, 15, 16, 20, 21, 25, 26, 30, 31, 35, 36,
40, 41, 45, 46, 50, 51, 55, 56, 60, 61, 65, 66, 70, 71, 75, 76,
80, 81, 85, 86, 90, 91, 95, 96, 100, 101, 105, 106, 110, 111, 115, 116};
-<<<<<<< HEAD
- answer.emplace_back(ml::train::TensorDim({3, 2, 2, 4}, NHWC_),
-=======
- answer.emplace_back(ml::train::TensorDim({3, 2, 2, 4}, NHWC_, FP32_),
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
+ answer.emplace_back(ml::train::TensorDim({3, 2, 2, 4}, {NHWC_, FP32_}),
answer_data);
}
{
52, 53, 54, 57, 58, 59, 62, 63, 64, 67, 68, 69, 72, 73, 74,
77, 78, 79, 82, 83, 84, 87, 88, 89, 92, 93, 94, 97, 98, 99,
102, 103, 104, 107, 108, 109, 112, 113, 114, 117, 118, 119};
-<<<<<<< HEAD
- answer.emplace_back(ml::train::TensorDim({3, 3, 2, 4}, NHWC_),
-=======
- answer.emplace_back(ml::train::TensorDim({3, 3, 2, 4}, NHWC_, FP32_),
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
+ answer.emplace_back(ml::train::TensorDim({3, 3, 2, 4}, {NHWC_, FP32_}),
answer_data);
}
EXPECT_EQ(t.split({2, 3}, 1), answer);
}
{
-<<<<<<< HEAD
- nntrainer::TensorDim ref_dim(1, 6, 1, 4, NHWC_);
- nntrainer::Tensor t = ranged(1, 6, 1, 4, NHWC_);
-=======
nntrainer::TensorDim ref_dim(1, 6, 1, 4, NHWC_, FP32_);
nntrainer::Tensor t = ranged(1, 6, 1, 4, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
std::vector<nntrainer::Tensor> answer;
answer.reserve(3);
{
float answer_data[] = {0, 6, 12, 18};
-<<<<<<< HEAD
- answer.emplace_back(ml::train::TensorDim({1, 1, 1, 4}, NHWC_),
-=======
- answer.emplace_back(ml::train::TensorDim({1, 1, 1, 4}, NHWC_, FP32_),
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
+ answer.emplace_back(ml::train::TensorDim({1, 1, 1, 4}, {NHWC_, FP32_}),
answer_data);
}
{
float answer_data[] = {1, 2, 3, 7, 8, 9, 13, 14, 15, 19, 20, 21};
-<<<<<<< HEAD
- answer.emplace_back(ml::train::TensorDim({1, 3, 1, 4}, NHWC_),
-=======
- answer.emplace_back(ml::train::TensorDim({1, 3, 1, 4}, NHWC_, FP32_),
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
+ answer.emplace_back(ml::train::TensorDim({1, 3, 1, 4}, {NHWC_, FP32_}),
answer_data);
}
{
float answer_data[] = {4, 5, 10, 11, 16, 17, 22, 23};
-<<<<<<< HEAD
- answer.emplace_back(ml::train::TensorDim({1, 2, 1, 4}, NHWC_),
-=======
- answer.emplace_back(ml::train::TensorDim({1, 2, 1, 4}, NHWC_, FP32_),
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
+ answer.emplace_back(ml::train::TensorDim({1, 2, 1, 4}, {NHWC_, FP32_}),
answer_data);
}
EXPECT_EQ(t.split({1, 3, 2}, 1), answer);
}
TEST(nntrainer_Tensor, split_05_nhwc_n) {
-<<<<<<< HEAD
- nntrainer::Tensor t(3, 1, 1, 1, NHWC_);
-=======
nntrainer::Tensor t(3, 1, 1, 1, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
EXPECT_THROW(t.split({1, 1}, 0), std::invalid_argument);
}
TEST(nntrainer_Tensor, split_06_nhwc_n) {
-<<<<<<< HEAD
- nntrainer::Tensor t(3, 1, 1, 1, NHWC_);
-=======
nntrainer::Tensor t(3, 1, 1, 1, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
EXPECT_THROW(t.split({2, 0, 1}, 0), std::invalid_argument);
}
TEST(nntrainer_Tensor, split_07_nhwc_n) {
-<<<<<<< HEAD
- nntrainer::Tensor t(3, 1, 1, 1, NHWC_);
-=======
nntrainer::Tensor t(3, 1, 1, 1, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
EXPECT_THROW(t.split({}, 0), std::invalid_argument);
}
TEST(nntrainer_Tensor, transpose_nhwc_p) {
-<<<<<<< HEAD
- nntrainer::TensorDim ref_dim(3, 5, 2, 4, NHWC_);
-
- /// plain transpose
- {
- nntrainer::TensorDim ref_dim(3, 5, 2, 4, NHWC_);
- nntrainer::Tensor t = ranged(3, 5, 2, 4, NHWC_);
-=======
nntrainer::TensorDim ref_dim(3, 5, 2, 4, NHWC_, FP32_);
/// plain transpose
{
nntrainer::TensorDim ref_dim(3, 5, 2, 4, NHWC_, FP32_);
nntrainer::Tensor t = ranged(3, 5, 2, 4, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
float answer_data[] = {
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27,
84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97,
98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111,
112, 113, 114, 115, 116, 117, 118, 119};
-<<<<<<< HEAD
- nntrainer::Tensor answer(ml::train::TensorDim({3, 5, 2, 4}, NHWC_),
-=======
- nntrainer::Tensor answer(ml::train::TensorDim({3, 5, 2, 4}, NHWC_, FP32_),
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
+ nntrainer::Tensor answer(ml::train::TensorDim({3, 5, 2, 4}, {NHWC_, FP32_}),
answer_data);
nntrainer::Tensor m = t.transpose("0:1:2");
EXPECT_EQ(answer, m);
}
{
-<<<<<<< HEAD
- nntrainer::TensorDim ref_dim(3, 5, 2, 4, NHWC_);
- nntrainer::Tensor t = ranged(3, 5, 2, 4, NHWC_);
-=======
nntrainer::TensorDim ref_dim(3, 5, 2, 4, NHWC_, FP32_);
nntrainer::Tensor t = ranged(3, 5, 2, 4, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
float answer_data[] = {
0, 5, 10, 15, 1, 6, 11, 16, 2, 7, 12, 17, 3, 8,
13, 18, 4, 9, 14, 19, 20, 25, 30, 35, 21, 26, 31, 36,
81, 86, 91, 96, 82, 87, 92, 97, 83, 88, 93, 98, 84, 89,
94, 99, 100, 105, 110, 115, 101, 106, 111, 116, 102, 107, 112, 117,
103, 108, 113, 118, 104, 109, 114, 119};
-<<<<<<< HEAD
- nntrainer::Tensor answer(ml::train::TensorDim({3, 4, 2, 5}, NHWC_),
-=======
- nntrainer::Tensor answer(ml::train::TensorDim({3, 4, 2, 5}, NHWC_, FP32_),
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
+ nntrainer::Tensor answer(ml::train::TensorDim({3, 4, 2, 5}, {NHWC_, FP32_}),
answer_data);
nntrainer::Tensor m = t.transpose("2:1:0");
EXPECT_EQ(answer, m);
}
{
-<<<<<<< HEAD
- nntrainer::TensorDim ref_dim(3, 5, 2, 4, NHWC_);
- nntrainer::Tensor t = ranged(3, 5, 2, 4, NHWC_);
-=======
nntrainer::TensorDim ref_dim(3, 5, 2, 4, NHWC_, FP32_);
nntrainer::Tensor t = ranged(3, 5, 2, 4, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
float answer_data[] = {
0, 1, 2, 3, 4, 20, 21, 22, 23, 24, 5, 6, 7, 8,
9, 25, 26, 27, 28, 29, 10, 11, 12, 13, 14, 30, 31, 32,
84, 100, 101, 102, 103, 104, 85, 86, 87, 88, 89, 105, 106, 107,
108, 109, 90, 91, 92, 93, 94, 110, 111, 112, 113, 114, 95, 96,
97, 98, 99, 115, 116, 117, 118, 119};
-<<<<<<< HEAD
- nntrainer::Tensor answer(ml::train::TensorDim({3, 5, 4, 2}, NHWC_),
-=======
- nntrainer::Tensor answer(ml::train::TensorDim({3, 5, 4, 2}, NHWC_, FP32_),
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
+ nntrainer::Tensor answer(ml::train::TensorDim({3, 5, 4, 2}, {NHWC_, FP32_}),
answer_data);
nntrainer::Tensor m = t.transpose("0:2:1");
EXPECT_EQ(answer, m);
}
{
-<<<<<<< HEAD
- nntrainer::TensorDim ref_dim(3, 5, 2, 4, NHWC_);
- nntrainer::Tensor t = ranged(3, 5, 2, 4, NHWC_);
-=======
nntrainer::TensorDim ref_dim(3, 5, 2, 4, NHWC_, FP32_);
nntrainer::Tensor t = ranged(3, 5, 2, 4, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
float answer_data[] = {
0, 20, 1, 21, 2, 22, 3, 23, 4, 24, 5, 25, 6, 26, 7, 27,
8, 28, 9, 29, 10, 30, 11, 31, 12, 32, 13, 33, 14, 34, 15, 35,
80, 100, 81, 101, 82, 102, 83, 103, 84, 104, 85, 105, 86, 106, 87, 107,
88, 108, 89, 109, 90, 110, 91, 111, 92, 112, 93, 113, 94, 114, 95, 115,
96, 116, 97, 117, 98, 118, 99, 119};
-<<<<<<< HEAD
- nntrainer::Tensor answer(ml::train::TensorDim({3, 2, 4, 5}, NHWC_),
-=======
- nntrainer::Tensor answer(ml::train::TensorDim({3, 2, 4, 5}, NHWC_, FP32_),
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
+ nntrainer::Tensor answer(ml::train::TensorDim({3, 2, 4, 5}, {NHWC_, FP32_}),
answer_data);
nntrainer::Tensor m = t.transpose("1:2:0");
EXPECT_EQ(answer, m);
}
{
-<<<<<<< HEAD
- nntrainer::TensorDim ref_dim(3, 5, 2, 4, NHWC_);
- nntrainer::Tensor t = ranged(3, 5, 2, 4, NHWC_);
-=======
nntrainer::TensorDim ref_dim(3, 5, 2, 4, NHWC_, FP32_);
nntrainer::Tensor t = ranged(3, 5, 2, 4, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
float answer_data[] = {
0, 5, 10, 15, 20, 25, 30, 35, 1, 6, 11, 16, 21, 26, 31,
36, 2, 7, 12, 17, 22, 27, 32, 37, 3, 8, 13, 18, 23, 28,
59, 64, 69, 74, 79, 80, 85, 90, 95, 100, 105, 110, 115, 81, 86,
91, 96, 101, 106, 111, 116, 82, 87, 92, 97, 102, 107, 112, 117, 83,
88, 93, 98, 103, 108, 113, 118, 84, 89, 94, 99, 104, 109, 114, 119};
-<<<<<<< HEAD
- nntrainer::Tensor answer(ml::train::TensorDim({3, 4, 5, 2}, NHWC_),
-=======
- nntrainer::Tensor answer(ml::train::TensorDim({3, 4, 5, 2}, NHWC_, FP32_),
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
+ nntrainer::Tensor answer(ml::train::TensorDim({3, 4, 5, 2}, {NHWC_, FP32_}),
answer_data);
nntrainer::Tensor m = t.transpose("2:0:1");
EXPECT_EQ(answer, m);
}
{
-<<<<<<< HEAD
- nntrainer::TensorDim ref_dim(3, 5, 2, 4, NHWC_);
- nntrainer::Tensor t = ranged(3, 5, 2, 4, NHWC_);
-=======
nntrainer::TensorDim ref_dim(3, 5, 2, 4, NHWC_, FP32_);
nntrainer::Tensor t = ranged(3, 5, 2, 4, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
float answer_data[] = {
0, 20, 5, 25, 10, 30, 15, 35, 1, 21, 6, 26, 11, 31, 16, 36,
2, 22, 7, 27, 12, 32, 17, 37, 3, 23, 8, 28, 13, 33, 18, 38,
80, 100, 85, 105, 90, 110, 95, 115, 81, 101, 86, 106, 91, 111, 96, 116,
82, 102, 87, 107, 92, 112, 97, 117, 83, 103, 88, 108, 93, 113, 98, 118,
84, 104, 89, 109, 94, 114, 99, 119};
-<<<<<<< HEAD
- nntrainer::Tensor answer(ml::train::TensorDim({3, 2, 5, 4}, NHWC_),
-=======
- nntrainer::Tensor answer(ml::train::TensorDim({3, 2, 5, 4}, NHWC_, FP32_),
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
+ nntrainer::Tensor answer(ml::train::TensorDim({3, 2, 5, 4}, {NHWC_, FP32_}),
answer_data);
nntrainer::Tensor m = t.transpose("1:0:2");
EXPECT_EQ(answer, m);
/// outplace transpose
{
-<<<<<<< HEAD
- nntrainer::TensorDim ref_dim(3, 5, 2, 4, NHWC_);
- nntrainer::Tensor t = ranged(3, 5, 2, 4, NHWC_);
- nntrainer::Tensor m = ranged(3, 5, 2, 4, NHWC_);
-=======
nntrainer::TensorDim ref_dim(3, 5, 2, 4, NHWC_, FP32_);
nntrainer::Tensor t = ranged(3, 5, 2, 4, NHWC_, FP32_);
nntrainer::Tensor m = ranged(3, 5, 2, 4, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
float answer_data[] = {
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27,
84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97,
98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111,
112, 113, 114, 115, 116, 117, 118, 119};
-<<<<<<< HEAD
- nntrainer::Tensor answer(ml::train::TensorDim({3, 5, 2, 4}, NHWC_),
-=======
- nntrainer::Tensor answer(ml::train::TensorDim({3, 5, 2, 4}, NHWC_, FP32_),
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
+ nntrainer::Tensor answer(ml::train::TensorDim({3, 5, 2, 4}, {NHWC_, FP32_}),
answer_data);
t.transpose("0:1:2", m);
EXPECT_EQ(answer, m);
}
{
-<<<<<<< HEAD
- nntrainer::TensorDim ref_dim(3, 5, 2, 4, NHWC_);
- nntrainer::Tensor t = ranged(3, 5, 2, 4, NHWC_);
- nntrainer::Tensor m = ranged(3, 4, 2, 5, NHWC_);
-=======
nntrainer::TensorDim ref_dim(3, 5, 2, 4, NHWC_, FP32_);
nntrainer::Tensor t = ranged(3, 5, 2, 4, NHWC_, FP32_);
nntrainer::Tensor m = ranged(3, 4, 2, 5, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
float answer_data[] = {
0, 5, 10, 15, 1, 6, 11, 16, 2, 7, 12, 17, 3, 8,
13, 18, 4, 9, 14, 19, 20, 25, 30, 35, 21, 26, 31, 36,
81, 86, 91, 96, 82, 87, 92, 97, 83, 88, 93, 98, 84, 89,
94, 99, 100, 105, 110, 115, 101, 106, 111, 116, 102, 107, 112, 117,
103, 108, 113, 118, 104, 109, 114, 119};
-<<<<<<< HEAD
- nntrainer::Tensor answer(ml::train::TensorDim({3, 4, 2, 5}, NHWC_),
-=======
- nntrainer::Tensor answer(ml::train::TensorDim({3, 4, 2, 5}, NHWC_, FP32_),
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
+ nntrainer::Tensor answer(ml::train::TensorDim({3, 4, 2, 5}, {NHWC_, FP32_}),
answer_data);
t.transpose("2:1:0", m);
EXPECT_EQ(answer, m);
}
{
-<<<<<<< HEAD
- nntrainer::TensorDim ref_dim(3, 5, 2, 4, NHWC_);
- nntrainer::Tensor t = ranged(3, 5, 2, 4, NHWC_);
- nntrainer::Tensor m = ranged(3, 5, 4, 2, NHWC_);
-=======
nntrainer::TensorDim ref_dim(3, 5, 2, 4, NHWC_, FP32_);
nntrainer::Tensor t = ranged(3, 5, 2, 4, NHWC_, FP32_);
nntrainer::Tensor m = ranged(3, 5, 4, 2, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
float answer_data[] = {
0, 1, 2, 3, 4, 20, 21, 22, 23, 24, 5, 6, 7, 8,
9, 25, 26, 27, 28, 29, 10, 11, 12, 13, 14, 30, 31, 32,
84, 100, 101, 102, 103, 104, 85, 86, 87, 88, 89, 105, 106, 107,
108, 109, 90, 91, 92, 93, 94, 110, 111, 112, 113, 114, 95, 96,
97, 98, 99, 115, 116, 117, 118, 119};
-<<<<<<< HEAD
- nntrainer::Tensor answer(ml::train::TensorDim({3, 5, 4, 2}, NHWC_),
-=======
- nntrainer::Tensor answer(ml::train::TensorDim({3, 5, 4, 2}, NHWC_, FP32_),
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
+ nntrainer::Tensor answer(ml::train::TensorDim({3, 5, 4, 2}, {NHWC_, FP32_}),
answer_data);
t.transpose("0:2:1", m);
EXPECT_EQ(answer, m);
}
{
-<<<<<<< HEAD
- nntrainer::TensorDim ref_dim(3, 5, 2, 4, NHWC_);
- nntrainer::Tensor t = ranged(3, 5, 2, 4, NHWC_);
- nntrainer::Tensor m = ranged(3, 2, 4, 5, NHWC_);
-=======
nntrainer::TensorDim ref_dim(3, 5, 2, 4, NHWC_, FP32_);
nntrainer::Tensor t = ranged(3, 5, 2, 4, NHWC_, FP32_);
nntrainer::Tensor m = ranged(3, 2, 4, 5, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
float answer_data[] = {
0, 20, 1, 21, 2, 22, 3, 23, 4, 24, 5, 25, 6, 26, 7, 27,
8, 28, 9, 29, 10, 30, 11, 31, 12, 32, 13, 33, 14, 34, 15, 35,
80, 100, 81, 101, 82, 102, 83, 103, 84, 104, 85, 105, 86, 106, 87, 107,
88, 108, 89, 109, 90, 110, 91, 111, 92, 112, 93, 113, 94, 114, 95, 115,
96, 116, 97, 117, 98, 118, 99, 119};
-<<<<<<< HEAD
- nntrainer::Tensor answer(ml::train::TensorDim({3, 2, 4, 5}, NHWC_),
-=======
- nntrainer::Tensor answer(ml::train::TensorDim({3, 2, 4, 5}, NHWC_, FP32_),
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
+ nntrainer::Tensor answer(ml::train::TensorDim({3, 2, 4, 5}, {NHWC_, FP32_}),
answer_data);
t.transpose("1:2:0", m);
EXPECT_EQ(answer, m);
}
{
-<<<<<<< HEAD
- nntrainer::TensorDim ref_dim(3, 5, 2, 4, NHWC_);
- nntrainer::Tensor t = ranged(3, 5, 2, 4, NHWC_);
- nntrainer::Tensor m = ranged(3, 4, 5, 2, NHWC_);
-=======
nntrainer::TensorDim ref_dim(3, 5, 2, 4, NHWC_, FP32_);
nntrainer::Tensor t = ranged(3, 5, 2, 4, NHWC_, FP32_);
nntrainer::Tensor m = ranged(3, 4, 5, 2, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
float answer_data[] = {
0, 5, 10, 15, 20, 25, 30, 35, 1, 6, 11, 16, 21, 26, 31,
36, 2, 7, 12, 17, 22, 27, 32, 37, 3, 8, 13, 18, 23, 28,
59, 64, 69, 74, 79, 80, 85, 90, 95, 100, 105, 110, 115, 81, 86,
91, 96, 101, 106, 111, 116, 82, 87, 92, 97, 102, 107, 112, 117, 83,
88, 93, 98, 103, 108, 113, 118, 84, 89, 94, 99, 104, 109, 114, 119};
-<<<<<<< HEAD
- nntrainer::Tensor answer(ml::train::TensorDim({3, 4, 5, 2}, NHWC_),
-=======
- nntrainer::Tensor answer(ml::train::TensorDim({3, 4, 5, 2}, NHWC_, FP32_),
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
+ nntrainer::Tensor answer(ml::train::TensorDim({3, 4, 5, 2}, {NHWC_, FP32_}),
answer_data);
t.transpose("2:0:1", m);
EXPECT_EQ(answer, m);
}
{
-<<<<<<< HEAD
- nntrainer::TensorDim ref_dim(3, 5, 2, 4, NHWC_);
- nntrainer::Tensor t = ranged(3, 5, 2, 4, NHWC_);
- nntrainer::Tensor m = ranged(3, 2, 5, 4, NHWC_);
-=======
nntrainer::TensorDim ref_dim(3, 5, 2, 4, NHWC_, FP32_);
nntrainer::Tensor t = ranged(3, 5, 2, 4, NHWC_, FP32_);
nntrainer::Tensor m = ranged(3, 2, 5, 4, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
float answer_data[] = {
0, 20, 5, 25, 10, 30, 15, 35, 1, 21, 6, 26, 11, 31, 16, 36,
2, 22, 7, 27, 12, 32, 17, 37, 3, 23, 8, 28, 13, 33, 18, 38,
80, 100, 85, 105, 90, 110, 95, 115, 81, 101, 86, 106, 91, 111, 96, 116,
82, 102, 87, 107, 92, 112, 97, 117, 83, 103, 88, 108, 93, 113, 98, 118,
84, 104, 89, 109, 94, 114, 99, 119};
-<<<<<<< HEAD
- nntrainer::Tensor answer(ml::train::TensorDim({3, 2, 5, 4}, NHWC_),
-=======
- nntrainer::Tensor answer(ml::train::TensorDim({3, 2, 5, 4}, NHWC_, FP32_),
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
+ nntrainer::Tensor answer(ml::train::TensorDim({3, 2, 5, 4}, {NHWC_, FP32_}),
answer_data);
t.transpose("1:0:2", m);
EXPECT_EQ(answer, m);
}
TEST(nntrainer_Tensor, tranpose_dimension_not_match_nhwc_n) {
-<<<<<<< HEAD
- nntrainer::Tensor a(3, 5, 2, 4, NHWC_);
- nntrainer::Tensor b(3, 3, 1, 2, NHWC_);
-=======
nntrainer::Tensor a(3, 5, 2, 4, NHWC_, FP32_);
nntrainer::Tensor b(3, 3, 1, 2, NHWC_, FP32_);
->>>>>>> 4bea9510... [ Tensor ] Support NHWC for dot, add/multiply_strided and other ops
EXPECT_THROW(a.transpose("0:1:2", b), std::invalid_argument);
}