In this PR, a new type of tensor, the CharTensor class, is designed explicitly for handling signed 8-bit integer data types that have already undergone quantization.
This new tensor class aims to provide users with more options when working with tensors and their respective data types.
Currently, the CharTensor class does not support mathematical operations like multiplication or addition. However, these features will be added in future updates.
**Self-evaluation:**
1. Build test: [X]Passed [ ]Failed [ ]Skipped
2. Run test: [X]Passed [ ]Failed [ ]Skipped
Signed-off-by: Donghyeon Jeong <dhyeon.jeong@samsung.com>
/usr/include/nntrainer/memory_data.h
/usr/include/nntrainer/tensor.h
/usr/include/nntrainer/tensor_base.h
+/usr/include/nntrainer/char_tensor.h
/usr/include/nntrainer/float_tensor.h
/usr/include/nntrainer/tensor_wrap_specs.h
/usr/include/nntrainer/blas_interface.h
--- /dev/null
+// SPDX-License-Identifier: Apache-2.0
+/**
+ * @file char_tensor.cpp
+ * @date 02 April 2024
+ * @brief This is CharTensor class for 8-bit integer calculation
+ * @see https://github.com/nnstreamer/nntrainer
+ * @author Donghyeon Jeong <dhyeon.jeong@samsung.com>
+ * @bug No known bugs except for NYI items
+ */
+
+#include <iomanip>
+#include <iostream>
+
+#include <blas_interface.h>
+#include <char_tensor.h>
+#include <tensor.h>
+
+namespace nntrainer {
+
+CharTensor::CharTensor(std::string name_, Tformat fm) :
+ TensorBase(name_, fm, Tdatatype::QINT8) {}
+
+CharTensor::CharTensor(const TensorDim &d, bool alloc_now, Initializer init,
+ std::string name) :
+ TensorBase(d, alloc_now, init, name) {
+ if (alloc_now)
+ allocate();
+}
+
+CharTensor::CharTensor(const TensorDim &d, const void *buf) :
+ CharTensor(d, true) {
+ if (d.getDataLen() != 0) {
+ if (buf != nullptr)
+ copy(buf);
+ }
+}
+
+CharTensor::CharTensor(
+ std::vector<std::vector<std::vector<std::vector<int8_t>>>> const &d,
+ Tformat fm) {
+ if (d.empty() || d[0].empty() || d[0][0].empty() || d[0][0][0].empty()) {
+ throw std::out_of_range(
+ "[Tensor] trying to initialize CharTensor from empty vector");
+ }
+
+ dim.setTensorDim(0, d.size());
+ if (fm == Tformat::NCHW) {
+ dim.setTensorDim(1, d[0].size());
+ dim.setTensorDim(2, d[0][0].size());
+ dim.setTensorDim(3, d[0][0][0].size());
+ } else {
+ dim.setTensorDim(2, d[0].size());
+ dim.setTensorDim(3, d[0][0].size());
+ dim.setTensorDim(1, d[0][0][0].size());
+ }
+
+ dim.setTensorType({fm, Tdatatype::QINT8});
+
+ strides = dim.computeStrides();
+ contiguous = true;
+ initializer = Initializer::NONE;
+
+ MemoryData *mem_data =
+ new MemoryData((void *)(new int8_t[dim.getDataLen()]()));
+ data = std::shared_ptr<MemoryData>(mem_data, [](MemoryData *mem_data) {
+ delete[] mem_data->getAddr<int8_t>();
+ });
+
+ offset = 0;
+
+ // if fm == Tformat::NCHW, then dim[0] == batch , dim[1] == channel, dim[2]
+ // == height, dim[3] == width. and if fm == Tformat::NHWC, dim[0] == batch,
+ // dim[1] == height, dim[2] == width, dim[3] == channel
+ if (fm == Tformat::NCHW) {
+ for (unsigned int i = 0; i < batch(); ++i)
+ for (unsigned int j = 0; j < channel(); ++j)
+ for (unsigned int k = 0; k < height(); ++k)
+ for (unsigned int l = 0; l < width(); ++l)
+ this->setValue(i, j, k, l, d[i][j][k][l]);
+ } else {
+ for (unsigned int i = 0; i < batch(); ++i)
+ for (unsigned int j = 0; j < height(); ++j)
+ for (unsigned int k = 0; k < width(); ++k)
+ for (unsigned int l = 0; l < channel(); ++l)
+ this->setValue(i, l, j, k, d[i][j][k][l]);
+ }
+}
+
+bool CharTensor::operator==(const CharTensor &rhs) const {
+ const int8_t *_data = (int8_t *)getData();
+ const int8_t *_rdata = (int8_t *)rhs.getData();
+ for (size_t i = 0; i < size(); ++i) {
+ if (_data[i] != _rdata[i])
+ return false;
+ }
+
+ return true;
+}
+
+void CharTensor::allocate() {
+ if (empty() || data)
+ return;
+
+ if (src_tensor) {
+ /// allocate data based on the source tensor
+ allocateSrcTensor();
+ /** as this memory is shared, do NOT initialize */
+ } else {
+ /// allocate new memory for the tensor data
+ MemoryData *mem_data;
+
+ mem_data = new MemoryData((void *)(new int8_t[dim.getDataLen()]{}));
+ data = std::shared_ptr<MemoryData>(mem_data, [](auto *mem_data) {
+ delete[] mem_data->template getAddr<int8_t>();
+ delete mem_data;
+ });
+
+ offset = 0;
+ initialize();
+ }
+}
+
+void CharTensor::deallocate() {
+ data = nullptr;
+ offset = 0;
+}
+
+void *CharTensor::getData() const {
+ if (!data)
+ return nullptr;
+
+ data->validate();
+ return data->getAddr<int8_t>() + offset;
+}
+
+void *CharTensor::getData(size_t idx) const {
+ if (!data)
+ return nullptr;
+
+ data->validate();
+ return data->getAddr<int8_t>() + offset + idx;
+}
+
+void *CharTensor::getAddress(unsigned int i) {
+ size_t index = getIndex(batch(), channel(), height(), width());
+ if (i > index) {
+ return nullptr;
+ }
+ return &((int8_t *)getData())[i];
+}
+
+const void *CharTensor::getAddress(unsigned int i) const {
+ size_t index = getIndex(batch(), channel(), height(), width());
+ if (i > index) {
+ return nullptr;
+ }
+ return &((int8_t *)getData())[i];
+}
+
+const int8_t &CharTensor::getValue(unsigned int i) const {
+ return ((int8_t *)getData())[i];
+}
+
+int8_t &CharTensor::getValue(unsigned int i) {
+ return ((int8_t *)getData())[i];
+}
+
+const int8_t &CharTensor::getValue(unsigned int b, unsigned int c,
+ unsigned int h, unsigned int w) const {
+ return getValue(getIndex(b, c, h, w));
+}
+
+int8_t &CharTensor::getValue(unsigned int b, unsigned int c, unsigned int h,
+ unsigned int w) {
+ return getValue(getIndex(b, c, h, w));
+}
+
+void CharTensor::setValue(float value) {
+ int8_t *data = (int8_t *)getData();
+ std::fill(data, data + size(), value);
+}
+
+void CharTensor::addValue(unsigned int b, unsigned int c, unsigned int h,
+ unsigned int w, float value, float beta) {
+ auto const &idx = getIndex(b, c, h, w);
+ float output = ((int8_t *)getData())[idx];
+ output *= beta;
+ output += value;
+
+ ((int8_t *)getData())[idx] = std::trunc(output);
+}
+
+void CharTensor::setValue(unsigned int b, unsigned int c, unsigned int h,
+ unsigned int w, float value) {
+ ((int8_t *)getData())[getIndex(b, c, h, w)] = (int8_t)value;
+}
+
+void CharTensor::setZero() {
+ /// @todo replace with apply_i or scal
+ setValue(0);
+}
+
+void CharTensor::initialize() {
+ if (empty() || !isAllocated())
+ return;
+
+ /// @note Sampling from the normal/uniform distribution is invalid
+ switch (initializer) {
+ case Initializer::ZEROS:
+ setZero();
+ break;
+ case Initializer::ONES:
+ setValue(1.0f);
+ break;
+ case Initializer::NONE:
+ break;
+ default:
+ throw std::invalid_argument("Initializer not valid for " +
+ getStringDataType());
+ break;
+ }
+
+ putData();
+}
+
+void CharTensor::initialize(Initializer init) {
+ initializer = init;
+ initialize();
+}
+
+void CharTensor::copy(const Tensor &from) {
+ reshape(from.getDim());
+ copy(from.getData());
+}
+
+void CharTensor::copyData(const Tensor &from) {
+ NNTR_THROW_IF(!contiguous, std::invalid_argument)
+ << getName() << " is not contiguous, cannot copy.";
+
+ NNTR_THROW_IF(size() != from.size(), std::invalid_argument)
+ << "Size of tensor to copy must match";
+
+ /// @todo support copy from float32 & float16 to int8 data
+ /// @note this could require scale factor
+ switch (from.getDataType()) {
+ case ml::train::TensorDim::DataType::QINT8:
+ copy(from.getData());
+ default:
+ throw std::invalid_argument("Error: Unsupported data type");
+ break;
+ }
+}
+
+void CharTensor::copy_with_stride(const Tensor &input, Tensor &output) {
+ for (unsigned int b = 0; b < output.batch(); ++b) {
+ for (unsigned int c = 0; c < output.channel(); ++c) {
+ for (unsigned int h = 0; h < output.height(); ++h) {
+ for (unsigned int w = 0; w < output.width(); ++w) {
+ output.setValue(b, c, h, w, input.getValue<int8_t>(b, c, h, w));
+ }
+ }
+ }
+ }
+}
+
+std::vector<unsigned int> CharTensor::argmax() const {
+ std::vector<unsigned int> result;
+ const int8_t *data = (int8_t *)getData();
+ size_t batch_size = batch();
+ size_t feature_len = dim.getFeatureLen();
+
+ result.resize(batch_size);
+
+ for (unsigned int b = 0; b < batch_size; b++) {
+ auto max_iter =
+ std::max_element(data + b * feature_len, data + (b + 1) * feature_len);
+ result[b] = std::distance(data, max_iter) - (b * feature_len);
+ }
+ return result;
+}
+
+float CharTensor::max_abs() const {
+ const int8_t *data = (int8_t *)getData();
+ unsigned int idx;
+
+ int8_t max_val = data[0];
+ for (unsigned int i = 1; i < size(); i += 1) {
+ int8_t cur_val = (data[i] >= 0) ? data[i] : -1 * data[i];
+ if (cur_val > max_val) {
+ max_val = cur_val;
+ }
+ }
+
+ return max_val;
+}
+
+float CharTensor::maxValue() const {
+ const int8_t *data = (int8_t *)getData();
+ return *std::max_element(data, data + size());
+}
+
+float CharTensor::minValue() const {
+ const int8_t *data = (int8_t *)getData();
+ return *std::min_element(data, data + size());
+}
+
+void CharTensor::print(std::ostream &out) const {
+ const int8_t *data = (int8_t *)getData();
+ unsigned int len = size();
+ out << "data addr: " << reinterpret_cast<const float *>(data) << '\n';
+ out << dim;
+
+ if (len > 100) {
+ out << '[' << (int)data[0] << ' ' << (int)data[1] << ' ' << (int)data[2]
+ << " ... " << (int)data[len - 3] << ' ' << (int)data[len - 2] << ' '
+ << (int)data[len - 1] << ']' << std::endl;
+ return;
+ }
+
+ std::ios init(NULL);
+ init.copyfmt(out);
+ if (getFormat() == Tformat::NCHW) {
+ for (unsigned int k = 0; k < batch(); k++) {
+ for (unsigned int l = 0; l < channel(); l++) {
+ for (unsigned int i = 0; i < height(); i++) {
+ for (unsigned int j = 0; j < width(); j++) {
+ out << std::setw(10) << (int)this->getValue(k, l, i, j) << " ";
+ }
+ out << std::endl;
+ }
+ out << std::endl;
+ }
+ out << "-------" << std::endl;
+ }
+ } else {
+ for (unsigned int k = 0; k < batch(); k++) {
+ for (unsigned int i = 0; i < height(); i++) {
+ for (unsigned int j = 0; j < width(); j++) {
+ for (unsigned int l = 0; l < channel(); l++) {
+ out << std::setw(10) << (int)this->getValue(k, l, i, j) << " ";
+ }
+ out << std::endl;
+ }
+ out << std::endl;
+ }
+ out << "-------" << std::endl;
+ }
+ out.copyfmt(init);
+ }
+}
+
+void CharTensor::copy(const void *buf) {
+ NNTR_THROW_IF(!contiguous, std::invalid_argument)
+ << getName() << " is not contiguous, cannot copy.";
+
+ if (buf == getData()) {
+ return;
+ }
+
+ /// @todo need to optimize
+ for (unsigned int i = 0; i < size(); ++i) {
+ ((int8_t *)getData())[i] = ((int8_t *)buf)[i];
+ }
+}
+
+} // namespace nntrainer
--- /dev/null
+// SPDX-License-Identifier: Apache-2.0
+/**
+ * @file char_tensor.h
+ * @date 02 April 2024
+ * @brief This is CharTensor class for 8-bit integer calculation
+ * @see https://github.com/nnstreamer/nntrainer
+ * @author Donghyeon Jeong <dhyeon.jeong@samsung.com>
+ * @bug No known bugs except for NYI items
+ */
+
+#ifndef __CHAR_TENSOR_H__
+#define __CHAR_TENSOR_H__
+#ifdef __cplusplus
+
+#include <tensor_base.h>
+
+namespace nntrainer {
+
+/**
+ * @class CharTensor class
+ * @brief CharTensor class for 8-bit integer calculation
+ */
+class CharTensor : public TensorBase {
+public:
+ /**
+ * @brief Basic Constructor of Tensor
+ */
+ CharTensor(std::string name_ = "", Tformat fm = Tformat::NCHW);
+
+ /**
+ * @brief Construct a new CharTensor object
+ *
+ * @param d Tensor dim for this float tensor
+ * @param alloc_now Allocate memory to this tensor or not
+ * @param init Initializer for the tensor
+ * @param name Name of the tensor
+ */
+ CharTensor(const TensorDim &d, bool alloc_now,
+ Initializer init = Initializer::NONE, std::string name = "");
+
+ /**
+ * @brief Construct a new CharTensor object
+ *
+ * @param d Tensor dim for this tensor
+ * @param buf buffer
+ */
+ CharTensor(const TensorDim &d, const void *buf = nullptr);
+
+ /**
+ * @brief Construct a new CharTensor object
+ *
+ * @param d data for the Tensor
+ * @param fm format for the Tensor
+ */
+ CharTensor(
+ std::vector<std::vector<std::vector<std::vector<int8_t>>>> const &d,
+ Tformat fm);
+
+ /**
+ * @brief Construct a new CharTensor object
+ * @param rhs TensorBase object to copy
+ */
+ CharTensor(TensorBase &rhs) : TensorBase(rhs) {}
+
+ /**
+ * @brief Basic Destructor
+ */
+ ~CharTensor() {}
+
+ /**
+ * @brief Comparison operator overload
+ * @param[in] rhs Tensor to be compared with
+ * @note Only compares Tensor data
+ */
+ bool operator==(const CharTensor &rhs) const;
+
+ /**
+ * @brief Comparison operator overload
+ * @param[in] rhs Tensor to be compared with
+ * @note Only compares Tensor data
+ */
+ bool operator!=(const CharTensor &rhs) const { return !(*this == rhs); }
+
+ /**
+ * @copydoc Tensor::allocate()
+ */
+ void allocate() override;
+
+ /**
+ * @copydoc Tensor::deallocate()
+ */
+ void deallocate() override;
+
+ /**
+ * @copydoc Tensor::getData()
+ */
+ void *getData() const override;
+
+ /**
+ * @copydoc Tensor::getData(size_t idx)
+ */
+ void *getData(size_t idx) const override;
+
+ /**
+ * @brief i data index
+ * @retval address of ith data
+ */
+ void *getAddress(unsigned int i) override;
+
+ /**
+ * @brief i data index
+ * @retval address of ith data
+ */
+ const void *getAddress(unsigned int i) const override;
+
+ /**
+ * @brief return value at specific location
+ * @param[in] i index
+ */
+ const int8_t &getValue(unsigned int i) const;
+
+ /**
+ * @brief return value at specific location
+ * @param[in] i index
+ */
+ int8_t &getValue(unsigned int i);
+
+ /**
+ * @brief return value at specific location
+ * @param[in] b batch location
+ * @param[in] c channel location
+ * @param[in] h height location
+ * @param[in] w width location
+ */
+ const int8_t &getValue(unsigned int b, unsigned int c, unsigned int h,
+ unsigned int w) const;
+
+ /**
+ * @brief return value at specific location
+ * @param[in] b batch location
+ * @param[in] c channel location
+ * @param[in] h height location
+ * @param[in] w width location
+ */
+ int8_t &getValue(unsigned int b, unsigned int c, unsigned int h,
+ unsigned int w);
+
+ /**
+ * @copydoc Tensor::setValue(float value)
+ */
+ void setValue(float value) override;
+
+ /**
+ * @copydoc Tensor::setValue(b, c, h, w, value)
+ */
+ void setValue(unsigned int b, unsigned int c, unsigned int h, unsigned int w,
+ float value) override;
+
+ /**
+ * @copydoc Tensor::addValue(b, c, h, w, value, beta)
+ */
+ void addValue(unsigned int b, unsigned int c, unsigned int h, unsigned int w,
+ float value, float beta) override;
+
+ /**
+ * @copydoc Tensor::setZero()
+ */
+ void setZero() override;
+
+ /**
+ * @copydoc Tensor::initialize()
+ */
+ void initialize() override;
+
+ /**
+ * @copydoc Tensor::initialize(Initializer init)
+ */
+ void initialize(Initializer init) override;
+
+ /**
+ * @copydoc Tensor::copy(const Tensor &from)
+ */
+ void copy(const Tensor &from) override;
+
+ /**
+ * @copydoc Tensor::copyData(const Tensor &from)
+ */
+ void copyData(const Tensor &from) override;
+
+ /**
+ * @copydoc Tensor::copy_with_stride()
+ */
+ void copy_with_stride(const Tensor &input, Tensor &output) override;
+
+ /**
+ * @copydoc Tensor::argmax()
+ */
+ std::vector<unsigned int> argmax() const override;
+
+ /**
+ * @copydoc Tensor::max_abs()
+ */
+ float max_abs() const override;
+
+ /**
+ * @copydoc Tensor::maxValue()
+ */
+ float maxValue() const override;
+
+ /**
+ * @copydoc Tensor::minValue()
+ */
+ float minValue() const override;
+
+ /**
+ * @copydoc Tensor::print(std::ostream &out)
+ */
+ void print(std::ostream &out) const override;
+
+private:
+ /**
+ * @brief copy a buffer to @a this, the caller has to ensure that @a this is
+ * initialized otherwise undefined behavior
+ *
+ * @param buf buffer to copy from
+ */
+ void copy(const void *buf);
+
+ /**
+ * @brief Get the Data Type String object
+ * @return std::string of tensor data type (QINT8)
+ */
+ std::string getStringDataType() const override { return "QINT8"; }
+};
+
+} // namespace nntrainer
+
+#endif /* __cplusplus */
+#endif /* __CHAR_TENSOR_H__ */
throw std::invalid_argument("Error: enable-fp16 is not enabled");
#endif
break;
+ case ml::train::TensorDim::DataType::QINT8:
+ scopy_int8_to_float32(from.size(), from.getData<uint8_t>(), 1,
+ (float *)getData(), 1);
+ break;
default:
throw std::invalid_argument("Error: Unsupported data type");
break;
case ml::train::TensorDim::DataType::FP16:
copy(from.getData<_FP16>());
break;
+ case ml::train::TensorDim::DataType::QINT8:
+ scopy_int8_to_float16(from.size(), from.getData<uint8_t>(), 1,
+ (_FP16 *)getData(), 1);
+ break;
default:
throw std::invalid_argument("Error: Unsupported data type");
break;
'tensor.cpp',
'tensor_base.cpp',
'float_tensor.cpp',
+ 'char_tensor.cpp',
'tensor_dim.cpp',
'var_grad.cpp',
'weight.cpp',
'tensor.h',
'tensor_base.h',
'float_tensor.h',
+ 'char_tensor.h',
'weight.h',
'var_grad.h',
'tensor_wrap_specs.h',
* @bug No known bugs except for NYI items
*/
+#include <char_tensor.h>
#include <float_tensor.h>
#include <lazy_tensor.h>
#include <tensor.h>
#else
throw std::invalid_argument("Error: enable-fp16 is not enabled");
#endif
+ } else if (d_type == Tdatatype::QINT8) {
+ itensor = std::shared_ptr<CharTensor>(new CharTensor(name_, fm),
+ std::default_delete<CharTensor>());
} else {
throw std::invalid_argument(
"Error: Tensor cannot be constructed because the given d_type is not "
#else
throw std::invalid_argument("Error: enable-fp16 is not enabled");
#endif
+ } else if (d.getDataType() == Tdatatype::QINT8) {
+ itensor =
+ std::shared_ptr<CharTensor>(new CharTensor(d, alloc_now, init, name),
+ std::default_delete<CharTensor>());
} else {
throw std::invalid_argument(
"Error: Tensor cannot be constructed because the given d_type is not "
#else
throw std::invalid_argument("Error: enable-fp16 is not enabled");
#endif
+ } else if (d.getDataType() == Tdatatype::QINT8) {
+ itensor = std::shared_ptr<CharTensor>(new CharTensor(d, buf),
+ std::default_delete<CharTensor>());
} else {
throw std::invalid_argument(
"Error: Tensor cannot be constructed because the given d_type is not "
#else
throw std::invalid_argument("Error: enable-fp16 is not enabled");
#endif
+ } else if (rhs.getDataType() == Tdatatype::QINT8) {
+ itensor = std::shared_ptr<CharTensor>(new CharTensor(*rhs.itensor),
+ std::default_delete<CharTensor>());
}
}
#else
throw std::invalid_argument("Error: enable-fp16 is not enabled");
#endif
+ } else if (rhs.getDataType() == Tdatatype::QINT8) {
+ itensor = std::shared_ptr<CharTensor>(new CharTensor(*rhs.itensor),
+ std::default_delete<CharTensor>());
}
return *this;
}
"Error: HalfTensor cannot be created or used when FP16 is not enabled. "
"Please check if the tensor data type is set properly.");
#endif
+ } else if (getDataType() == Tdatatype::QINT8) {
+ return *std::dynamic_pointer_cast<CharTensor>(itensor) ==
+ *std::dynamic_pointer_cast<CharTensor>(rhs.itensor);
}
}
return false;
#include <cstddef>
#include <blas_interface.h>
+#include <char_tensor.h>
#include <float_tensor.h>
#include <nntrainer_log.h>
#include <tensor_base.h>
Tensor(std::vector<std::decay<decltype(d)>::type>{d}, t_type){};
#endif
+ /**
+ * @brief Constructor of Tensor
+ * @param[in] d data for the Tensor. It needs to set format properly.
+ * @param[in] t_type Tensor Type
+ */
+ Tensor(std::vector<std::vector<std::vector<std::vector<int8_t>>>> const &d,
+ ml::train::TensorDim::TensorType t_type) {
+ itensor = std::shared_ptr<CharTensor>(new CharTensor(d, t_type.format),
+ std::default_delete<CharTensor>());
+ }
+
+ /**
+ * @brief Constructor of Tensor
+ * @note This constructor copies vector again. needs refactoring
+ * @param[in] d data for the Tensor. It needs to set format properly.
+ * @param[in] t_type Tensor Type
+ */
+ Tensor(std::vector<std::vector<std::vector<int8_t>>> const &d,
+ ml::train::TensorDim::TensorType t_type) :
+ Tensor(std::vector<std::decay<decltype(d)>::type>{d}, t_type){};
+
+ /**
+ * @brief Constructor of Tensor
+ * @note This constructor copies vector again. needs refactoring
+ * @param[in] d data for the Tensor with batch size one
+ * @param[in] t_type Tensor Type
+ */
+ Tensor(std::vector<std::vector<int8_t>> const &d,
+ ml::train::TensorDim::TensorType t_type) :
+ Tensor(std::vector<std::decay<decltype(d)>::type>{d}, t_type){};
+
/**
* @brief Basic Destructor
*/
%{_includedir}/nntrainer/memory_data.h
%{_includedir}/nntrainer/tensor.h
%{_includedir}/nntrainer/tensor_base.h
+%{_includedir}/nntrainer/char_tensor.h
%{_includedir}/nntrainer/float_tensor.h
%if 0%{?enable_fp16}
%{_includedir}/nntrainer/half_tensor.h
EXPECT_EQ(status, ML_ERROR_NONE);
}
-// TEST(nntrainer_Tensor, Tensor_04_p) {
-// int status = ML_ERROR_NONE;
-// int batch = 3;
-// int height = 3;
-// int width = 10;
-// std::vector<std::vector<std::vector<uint8_t>>> in;
-
-// for (int k = 0; k < batch; ++k) {
-// std::vector<std::vector<uint8_t>> ttv;
-// for (int i = 0; i < height; ++i) {
-// std::vector<uint8_t> tv;
-// for (int j = 0; j < width; ++j) {
-// tv.push_back(k * height * width + i * width + j);
-// }
-// ttv.push_back(tv);
-// }
-// in.push_back(ttv);
-// }
+TEST(nntrainer_Tensor, Tensor_04_p) {
+ int status = ML_ERROR_NONE;
+ int batch = 3;
+ int height = 3;
+ int width = 10;
+ std::vector<std::vector<std::vector<int8_t>>> in;
-// nntrainer::Tensor tensor = nntrainer::Tensor(
-// in, {nntrainer::Tformat::NCHW, nntrainer::Tdatatype::QINT8});
-// ASSERT_NE(nullptr, tensor.getData<uint8_t>());
+ for (int k = 0; k < batch; ++k) {
+ std::vector<std::vector<int8_t>> ttv;
+ for (int i = 0; i < height; ++i) {
+ std::vector<int8_t> tv;
+ for (int j = 0; j < width; ++j) {
+ tv.push_back(k * height * width + i * width + j);
+ }
+ ttv.push_back(tv);
+ }
+ in.push_back(ttv);
+ }
-// if (tensor.getValue<uint8_t>(0, 0, 0, 1) != 1)
-// status = ML_ERROR_INVALID_PARAMETER;
-// EXPECT_EQ(status, ML_ERROR_NONE);
-// }
+ nntrainer::Tensor tensor = nntrainer::Tensor(
+ in, {nntrainer::Tformat::NCHW, nntrainer::Tdatatype::QINT8});
+ ASSERT_NE(nullptr, tensor.getData<int8_t>());
+
+ if (tensor.getValue<int8_t>(0, 0, 0, 1) != 1)
+ status = ML_ERROR_INVALID_PARAMETER;
+ EXPECT_EQ(status, ML_ERROR_NONE);
+}
// TEST(nntrainer_Tensor, Tensor_05_p) {
// int status = ML_ERROR_NONE;
EXPECT_TRUE(t.isAllocated());
}
-// TEST(nntrainer_Tensor, allocate_04_p) {
-// nntrainer::Tensor t(
-// {1, 2, 3, 4, {nntrainer::Tformat::NCHW, nntrainer::Tdatatype::QINT8}},
-// true);
-// EXPECT_TRUE(t.isAllocated());
+TEST(nntrainer_Tensor, allocate_04_p) {
+ nntrainer::Tensor t(
+ {1, 2, 3, 4, {nntrainer::Tformat::NCHW, nntrainer::Tdatatype::QINT8}},
+ true);
+ EXPECT_TRUE(t.isAllocated());
-// t.allocate();
-// EXPECT_TRUE(t.isAllocated());
-// }
+ t.allocate();
+ EXPECT_TRUE(t.isAllocated());
+}
// TEST(nntrainer_Tensor, allocate_05_p) {
// nntrainer::Tensor t(
EXPECT_EQ(golden, t);
}
-// TEST(nntrainer_Tensor, initialize_09_p) {
-// nntrainer::Tensor t(
-// {1, 2, 3, 4, {nntrainer::Tformat::NCHW, nntrainer::Tdatatype::QINT4}},
-// true, nntrainer::Initializer::ONES);
-// nntrainer::Tensor golden(
-// {1, 2, 3, 4, {nntrainer::Tformat::NCHW, nntrainer::Tdatatype::QINT4}},
-// true, nntrainer::Initializer::ZEROS);
-// EXPECT_NE(golden, t);
-// golden.initialize(nntrainer::Initializer::ONES);
-// EXPECT_EQ(golden, t);
-// }
+TEST(nntrainer_Tensor, initialize_09_p) {
+ nntrainer::Tensor t(
+ {1, 2, 3, 4, {nntrainer::Tformat::NCHW, nntrainer::Tdatatype::QINT8}}, true,
+ nntrainer::Initializer::ONES);
+ nntrainer::Tensor golden(
+ {1, 2, 3, 4, {nntrainer::Tformat::NCHW, nntrainer::Tdatatype::QINT8}}, true,
+ nntrainer::Initializer::ZEROS);
+ EXPECT_NE(golden, t);
+ golden.initialize(nntrainer::Initializer::ONES);
+ EXPECT_EQ(golden, t);
+}
TEST(nntrainer_Tensor, split_01_p) {
{
EXPECT_NO_THROW(pool.deallocate());
}
-// /**
-// * @brief qint8 tensors reuse fp32 tensor memory space
-// */
-// TEST(TensorPool, validate_memory_reuse_01_p) {
-// // |--------- t1 ---------|
-// // |-t2-||-t3-||-t4-||-t5-|
-// nntrainer::TensorPool pool;
-// nntrainer::Tensor *t1 = nullptr, *t2 = nullptr, *t3 = nullptr, *t4 =
-// nullptr,
-// *t5 = nullptr;
+/**
+ * @brief qint8 tensors reuse fp32 tensor memory space
+ */
+TEST(TensorPool, validate_memory_reuse_01_p) {
+ // |--------- t1 ---------|
+ // |-t2-||-t3-||-t4-||-t5-|
+ nntrainer::TensorPool pool;
+ nntrainer::Tensor *t1 = nullptr, *t2 = nullptr, *t3 = nullptr, *t4 = nullptr,
+ *t5 = nullptr;
-// EXPECT_NO_THROW(
-// t1 = pool.request("t1", nntrainer::TensorDim({4}), {0},
-// nntrainer::TensorLifespan::FORWARD_FUNC_LIFESPAN));
-// EXPECT_NE(t1, nullptr);
-// EXPECT_FALSE(t1->isAllocated());
+ EXPECT_NO_THROW(
+ t1 = pool.request("t1", nntrainer::TensorDim({4}), {0},
+ nntrainer::TensorLifespan::FORWARD_FUNC_LIFESPAN));
+ EXPECT_NE(t1, nullptr);
+ EXPECT_FALSE(t1->isAllocated());
-// EXPECT_NO_THROW(
-// t2 = pool.request("t2",
-// nntrainer::TensorDim({4}, {nntrainer::Tformat::NCHW,
-// nntrainer::Tdatatype::QINT8}),
-// {1},
-// nntrainer::TensorLifespan::BACKWARD_FUNC_LIFESPAN));
-// EXPECT_NE(t2, nullptr);
-// EXPECT_FALSE(t2->isAllocated());
+ EXPECT_NO_THROW(
+ t2 = pool.request("t2",
+ nntrainer::TensorDim({4}, {nntrainer::Tformat::NCHW,
+ nntrainer::Tdatatype::QINT8}),
+ {1}, nntrainer::TensorLifespan::BACKWARD_FUNC_LIFESPAN));
+ EXPECT_NE(t2, nullptr);
+ EXPECT_FALSE(t2->isAllocated());
-// EXPECT_NO_THROW(
-// t3 = pool.request("t3",
-// nntrainer::TensorDim({4}, {nntrainer::Tformat::NCHW,
-// nntrainer::Tdatatype::QINT8}),
-// {1},
-// nntrainer::TensorLifespan::BACKWARD_FUNC_LIFESPAN));
-// EXPECT_NE(t3, nullptr);
-// EXPECT_FALSE(t3->isAllocated());
+ EXPECT_NO_THROW(
+ t3 = pool.request("t3",
+ nntrainer::TensorDim({4}, {nntrainer::Tformat::NCHW,
+ nntrainer::Tdatatype::QINT8}),
+ {1}, nntrainer::TensorLifespan::BACKWARD_FUNC_LIFESPAN));
+ EXPECT_NE(t3, nullptr);
+ EXPECT_FALSE(t3->isAllocated());
-// EXPECT_NO_THROW(
-// t4 = pool.request("t4",
-// nntrainer::TensorDim({4}, {nntrainer::Tformat::NCHW,
-// nntrainer::Tdatatype::QINT8}),
-// {1},
-// nntrainer::TensorLifespan::BACKWARD_FUNC_LIFESPAN));
-// EXPECT_NE(t4, nullptr);
-// EXPECT_FALSE(t4->isAllocated());
+ EXPECT_NO_THROW(
+ t4 = pool.request("t4",
+ nntrainer::TensorDim({4}, {nntrainer::Tformat::NCHW,
+ nntrainer::Tdatatype::QINT8}),
+ {1}, nntrainer::TensorLifespan::BACKWARD_FUNC_LIFESPAN));
+ EXPECT_NE(t4, nullptr);
+ EXPECT_FALSE(t4->isAllocated());
-// EXPECT_NO_THROW(
-// t5 = pool.request("t5",
-// nntrainer::TensorDim({4}, {nntrainer::Tformat::NCHW,
-// nntrainer::Tdatatype::QINT8}),
-// {1},
-// nntrainer::TensorLifespan::BACKWARD_FUNC_LIFESPAN));
-// EXPECT_NE(t5, nullptr);
-// EXPECT_FALSE(t5->isAllocated());
+ EXPECT_NO_THROW(
+ t5 = pool.request("t5",
+ nntrainer::TensorDim({4}, {nntrainer::Tformat::NCHW,
+ nntrainer::Tdatatype::QINT8}),
+ {1}, nntrainer::TensorLifespan::BACKWARD_FUNC_LIFESPAN));
+ EXPECT_NE(t5, nullptr);
+ EXPECT_FALSE(t5->isAllocated());
-// EXPECT_NO_THROW(pool.finalize(nntrainer::OptimizedV1Planner(), 0, 2));
-// EXPECT_EQ(pool.minMemoryRequirement(), t1->bytes());
+ EXPECT_NO_THROW(pool.finalize(nntrainer::OptimizedV1Planner(), 0, 2));
+ EXPECT_EQ(pool.minMemoryRequirement(), t1->bytes());
-// EXPECT_NO_THROW(pool.allocate());
+ EXPECT_NO_THROW(pool.allocate());
-// EXPECT_EQ(t1->getAddress<float>(0), (float *)t2->getAddress<uint8_t>(0));
-// EXPECT_EQ(t1->getAddress<float>(1), (float *)t3->getAddress<uint8_t>(0));
-// EXPECT_EQ(t1->getAddress<float>(2), (float *)t4->getAddress<uint8_t>(0));
-// EXPECT_EQ(t1->getAddress<float>(3), (float *)t5->getAddress<uint8_t>(0));
+ EXPECT_EQ(t1->getAddress<float>(0), (float *)t2->getAddress<int8_t>(0));
+ EXPECT_EQ(t1->getAddress<float>(1), (float *)t3->getAddress<int8_t>(0));
+ EXPECT_EQ(t1->getAddress<float>(2), (float *)t4->getAddress<int8_t>(0));
+ EXPECT_EQ(t1->getAddress<float>(3), (float *)t5->getAddress<int8_t>(0));
-// EXPECT_NO_THROW(pool.deallocate());
-// }
+ EXPECT_NO_THROW(pool.deallocate());
+}
// /**
// * @brief qint4 tensors reuse fp32 tensor memory space