1 // SPDX-License-Identifier: Apache-2.0
3 * Copyright (C) 2020 Jijoong Moon <jijoong.moon@samsung.com>
8 * @brief This is Tensor Dimension Class
9 * @see https://github.com/nnstreamer/nntrainer
10 * @author Jijoong Moon <jijoong.moon@samsung.com>
11 * @bug No known bugs except for NYI items
21 #include <nntrainer_error.h>
22 #include <nntrainer_log.h>
23 #include <tensor_dim.h>
24 #include <util_func.h>
29 TensorDim::TensorDim(TensorDim::Format fm, TensorDim::DataType d_type,
30 const std::bitset<MAXDIM> &eff_dim_flag_,
31 const std::bitset<MAXDIM> &dyn_dim_flag_) :
32 TensorDim(TensorDim::TensorType(fm, d_type), eff_dim_flag_, dyn_dim_flag_) {}
34 TensorDim::TensorDim(TensorType t_type_,
35 const std::bitset<MAXDIM> &eff_dim_flag_,
36 const std::bitset<MAXDIM> &dyn_dim_flag_) :
38 eff_dim_flag(eff_dim_flag_),
39 dyn_dim_flag(dyn_dim_flag_) {
40 for (size_t i = 0; i < MAXDIM; ++i) {
47 TensorDim::TensorDim(std::initializer_list<size_t> dims, TensorType t_type_) :
49 int shift_size = MAXDIM - dims.size();
52 throw std::invalid_argument("[TensorDim] max dimension is 4");
57 for (auto &i : dims) {
58 setTensorDim(shift_size + cnt, i);
63 TensorDim::TensorDim(const std::array<size_t, 3> &shapes, TensorType t_type_) :
64 TensorDim({shapes[0], shapes[1], shapes[2]}, t_type_) {}
66 TensorDim::TensorDim(size_t d0, size_t d1, size_t d2, size_t d3,
68 const std::bitset<MAXDIM> &eff_dim_flag_,
69 const std::bitset<MAXDIM> &dyn_dim_flag_) :
70 TensorDim(t_type_, eff_dim_flag_, dyn_dim_flag_) {
76 feature_len = d1 * d2 * d3;
77 len = d0 * feature_len;
80 TensorDim::TensorDim(size_t d0, size_t d1, size_t d2, size_t d3,
81 TensorDim::Format fm, TensorDim::DataType d_type,
82 const std::bitset<MAXDIM> &eff_dim_flag_,
83 const std::bitset<MAXDIM> &dyn_dim_flag_) :
84 TensorDim(d0, d1, d2, d3, TensorType(fm, d_type), eff_dim_flag_,
87 TensorDim::TensorDim(const std::string &shape, TensorType t_type_) :
89 if (setTensorDim(shape, t_type_) != ML_ERROR_NONE) {
90 throw std::invalid_argument("[TensorDim] Setting TensorDim failed");
94 TensorDim::TensorDim(const std::string &shape, TensorDim::Format fm,
95 TensorDim::DataType d_type) :
97 if (setTensorDim(shape, TensorType(fm, d_type)) != ML_ERROR_NONE) {
98 throw std::invalid_argument("[TensorDim] Setting TensorDim failed");
102 TensorDim &TensorDim::operator=(const TensorDim &rhs) {
110 TensorDim &TensorDim::operator=(TensorDim &&rhs) noexcept {
117 uint TensorDim::getDataTypeSize() const {
118 switch (t_type.data_type) {
119 case TensorDim::DataType::FP16:
121 return sizeof(_FP16);
125 case TensorDim::DataType::FP32:
126 return sizeof(float);
128 return sizeof(float);
132 void TensorDim::resetLen() {
133 feature_len = dim[1] * dim[2] * dim[3];
134 len = dim[0] * feature_len;
137 const size_t TensorDim::getTensorDim(unsigned int idx) const {
139 throw std::invalid_argument(
140 "[TensorDim] Tensor Dimension index should be between 0 and 4");
145 void TensorDim::setTensorDim(unsigned int idx, size_t value) {
147 throw std::out_of_range(
148 "[TensorDim] Tensor Dimension index should be between 0 and 4");
151 throw std::invalid_argument(
152 "[TensorDim] Trying to assign value <=0 to tensor dim");
155 for (size_t i = 0; i < MAXDIM; ++i) {
164 int TensorDim::setTensorDim(const std::string &input_shape,
165 TensorType t_type_) {
166 int status = ML_ERROR_NONE;
167 static const std::regex words_regex("[^\\s.,:;!?]+");
169 std::sregex_iterator(input_shape.begin(), input_shape.end(), words_regex);
170 auto words_end = std::sregex_iterator();
171 int cur_dim = std::distance(words_begin, words_end);
172 if (cur_dim <= 0 || (size_t)cur_dim > MAXDIM) {
173 ml_loge("Tensor Dimension should be between 1 and 4");
174 return ML_ERROR_INVALID_PARAMETER;
177 for (std::sregex_iterator i = words_begin; i != words_end; ++i, ++cn) {
178 setTensorDim(MAXDIM - cur_dim + cn, std::stoul((*i).str()));
184 // int TensorDim::setTensorDim(const std::string &input_shape,
185 // TensorDim::Format fm, TensorDim::DataType d_type)
187 // return setTensorDim(input_shape, TensorType{fm, d_type});
190 void TensorDim::setEffDimFlag(const std::bitset<MAXDIM> &dim_flag_) {
191 eff_dim_flag = dim_flag_;
194 void TensorDim::setDynDimFlag(const std::bitset<MAXDIM> &dim_flag_) {
195 dyn_dim_flag = dim_flag_;
198 const std::bitset<TensorDim::MAXDIM> &TensorDim::getEffDimFlag() const {
202 const std::bitset<TensorDim::MAXDIM> &TensorDim::getDynDimFlag() const {
206 void swap(TensorDim &lhs, TensorDim &rhs) noexcept {
207 std::swap_ranges(std::begin(lhs.dim), std::begin(lhs.dim) + TensorDim::MAXDIM,
208 std::begin(rhs.dim));
209 std::swap(lhs.len, rhs.len);
210 std::swap(lhs.feature_len, rhs.feature_len);
211 std::swap(lhs.eff_dim_flag, rhs.eff_dim_flag);
212 std::swap(lhs.dyn_dim_flag, rhs.dyn_dim_flag);
213 std::swap(lhs.t_type, rhs.t_type);
216 size_t TensorDim::batch() const { return dim[0]; };
218 size_t TensorDim::channel() const { return dim[1]; };
220 size_t TensorDim::height() const { return dim[2]; };
222 size_t TensorDim::width() const { return dim[3]; };
224 size_t TensorDim::getDataLen() const { return len; };
226 size_t TensorDim::getFeatureLen() const { return feature_len; };
228 void TensorDim::batch(size_t b) { setTensorDim(0, b); }
230 void TensorDim::channel(size_t c) { setTensorDim(1, c); }
232 void TensorDim::height(size_t h) { setTensorDim(2, h); }
234 void TensorDim::width(size_t w) { setTensorDim(3, w); }
236 const size_t *TensorDim::getDim() const { return dim; }
238 unsigned int TensorDim::getNumDim() { return MAXDIM; }
240 TensorDim TensorDim::transpose(const std::string &direction) const {
241 int dirs[MAXDIM - 1];
243 int status = nntrainer::getValues(3, direction, dirs);
244 NNTR_THROW_IF(status != ML_ERROR_NONE, std::invalid_argument)
245 << "parsing direction failed";
247 const std::array<size_t, MAXDIM> axes{
248 {0, (size_t)dirs[0] + 1, (size_t)dirs[1] + 1, (size_t)dirs[2] + 1}};
250 return transpose(axes);
253 TensorDim TensorDim::transpose(const std::array<size_t, MAXDIM> &axes) const {
254 TensorDim tmp(*this);
256 for (unsigned int i = 0; i < MAXDIM; ++i) {
257 tmp.setTensorDim(i, getTensorDim(axes[i]));
263 bool TensorDim::operator==(const TensorDim &rhs) const {
264 if (this->t_type.format != rhs.t_type.format)
267 if (this->t_type.data_type != rhs.t_type.data_type)
270 for (size_t i = 0; i < MAXDIM; ++i) {
271 if (this->dim[i] != rhs.dim[i]) {
279 bool TensorDim::operator!=(const TensorDim &rhs) const {
280 return !(*this == rhs);
283 bool TensorDim::isEmpty() const { return len == 0; }
285 unsigned int TensorDim::rank() const {
286 unsigned int rank = 0;
287 for (unsigned int i = 0; i < MAXDIM; i++) {
294 size_t &TensorDim::operator[](const unsigned int index) {
296 throw std::out_of_range(
297 "[TensorDim] Tensor Dimension index should be between 0 and 4");
301 const size_t &TensorDim::operator[](const unsigned int index) const {
303 throw std::out_of_range(
304 "[TensorDim] Tensor Dimension index should be between 0 and 4");
308 std::array<size_t, TensorDim::MAXDIM> TensorDim::computeStrides() const {
309 if (getFormat() == TensorDim::Format::NCHW) {
310 return {dim[1] * dim[2] * dim[3], dim[2] * dim[3], dim[3], 1};
312 return {height() * channel() * width(), width() * channel(), channel(), 1};
316 void TensorDim::reverse() { std::reverse(dim, dim + MAXDIM); }
318 std::vector<int> TensorDim::getEffectiveDimension(bool dynamic) const {
319 std::vector<int> eff_dim;
320 eff_dim.reserve(eff_dim_flag.count());
322 auto get_axis = [dynamic, this](unsigned int axis) -> int {
323 if (dynamic && dyn_dim_flag[MAXDIM - axis - 1]) {
330 for (unsigned int i = 0; i < MAXDIM; ++i) {
331 /// flip dim_flag to effectively match with our cognition
332 /// ex) 3:5:1:1 -> 3:5, we are setting eff_dim_flag to 0b1100
333 if (eff_dim_flag[MAXDIM - i - 1]) {
334 eff_dim.push_back(get_axis(i));
341 bool TensorDim::is_dynamic() const { return dyn_dim_flag.any(); }
343 std::ostream &operator<<(std::ostream &out, TensorDim const &d) {
346 (d.getDataType() == ml::train::TensorDim::DataType::FP16) ? "FP16" : "FP32";
347 std::string format_ =
348 (d.getFormat() == ml::train::TensorDim::Format::NCHW) ? "NCHW" : "NHWC";
349 out << "Shape: " << d.batch() << ":" << d.channel() << ":" << d.height()
350 << ":" << d.width() << " [ " << type_ << " : " << format_ << " ]"
355 } /* namespace train */