return result;
}
-Tensor Tensor::add(Tensor const &m) const {
+/**
+ * @brief Add Tensor Element by Element without mem copy
+ * @param[in] m Tensor to be added
+ * #retval #ML_ERROR_NONE Successful
+ * #retval #ML_ERROR_INVALID_PARAMETER Invalid Parameter
+ */
+int Tensor::add_i(Tensor const &m) {
if ((dim.height() != m.dim.height()) || (dim.width() != m.dim.width())) {
- throw std::runtime_error("Error: Dimension must be equal each other");
+ return ML_ERROR_INVALID_PARAMETER;
}
- Tensor result(dim);
#ifdef USE_BLAS
- cblas_scopy(dim.getDataLen(), this->data.data(), 1, result.data.data(), 1);
- unsigned int size = dim.channel() * dim.width() * dim.height();
-
+ unsigned int size = dim.width() * dim.height() * dim.channel();
if (m.dim.batch() == 1) {
for (unsigned int k = 0; k < dim.batch(); ++k) {
- cblas_saxpy(size, 1.0, m.data.data(), 1, &(result.data.data()[k * size]),
+ cblas_saxpy(size, 1.0, m.data.data(), 1, &(this->data.data()[k * size]),
1);
}
} else {
- cblas_saxpy(dim.getDataLen(), 1.0, m.data.data(), 1, result.data.data(), 1);
+ cblas_saxpy(dim.getDataLen(), 1.0, m.data.data(), 1, this->data.data(), 1);
}
#else
unsigned int i, j, k;
for (k = 0; k < dim.batch(); ++k) {
for (i = 0; i < m.dim.getFeatureLen(); ++i) {
j = k * m.dim.getFeatureLen();
- result.data[j + i] = data[j + i] + m.data[i];
+ this->data[j + i] += m.data[i];
}
}
} else {
for (k = 0; k < dim.getDataLen(); ++k) {
- result.data[k] = data[k] + m.data[k];
+ this->data[k] = this->data[k] + m.data[k];
}
}
#endif
+ return ML_ERROR_NONE;
+}
+
+Tensor Tensor::add(Tensor const &m) const {
+ if ((dim.height() != m.dim.height()) || (dim.width() != m.dim.width())) {
+ throw std::runtime_error("Error: Dimension must be equal each other");
+ }
+
+ Tensor result(dim);
+ result.copy(*this);
+ result.add_i(m);
+
return result;
}
dim.height(from.dim.height());
dim.width(from.dim.width());
dim.batch(from.dim.batch());
+ if (this->data.empty()) {
+ this->data.resize(from.data.size());
+ }
#ifdef USE_BLAS
cblas_scopy(dim.getDataLen(), from.data.data(), 1, this->data.data(), 1);
#else
"Error: Dimension must be equal each other");
}
+TEST(nntrainer_Tensor, add_i_02_p) {
+ int status = ML_ERROR_NONE;
+ int batch = 3;
+ int height = 3;
+ int width = 10;
+ int channel = 1;
+
+ nntrainer::Tensor target(batch, channel, height, width);
+ GEN_TEST_INPUT(target, i * (batch * height) + j * (width) + k + 1);
+
+ nntrainer::Tensor original(batch, height, width);
+ original.copy(target);
+
+ status = target.add_i(target);
+ EXPECT_EQ(status, ML_ERROR_NONE);
+
+ float *previous = original.getData();
+ ASSERT_NE(nullptr, previous);
+ float *data = target.getData();
+ ASSERT_NE(nullptr, data);
+
+ for (int i = 0; i < batch * height * width; ++i) {
+ EXPECT_FLOAT_EQ(data[i], previous[i] + previous[i]);
+ }
+}
+
+/**
+ * @brief operand dimension is not right
+ */
+TEST(nntrainer_Tensor, add_i_01_n) {
+ int status = ML_ERROR_NONE;
+ int batch = 3;
+ int height = 3;
+ int width = 10;
+ int channel = 1;
+
+ nntrainer::Tensor target(batch, channel, height, width);
+ GEN_TEST_INPUT(target, i * (batch * height) + j * (width) + k + 1);
+
+ nntrainer::Tensor target2(batch, height - 2, width - 3);
+
+ status = target.add_i(target2);
+ EXPECT_EQ(status, ML_ERROR_INVALID_PARAMETER);
+}
+
TEST(nntrainer_Tensor, add_01_p) {
int status = ML_ERROR_NONE;
int batch = 3;