Index() = default;
public:
- Index(int32_t ch, int32_t row, int32_t col) : _ch{ch}, _row{row}, _col{col}
+ Index(int32_t ch, int32_t row, int32_t col) : _batch{1}, _ch{ch}, _row{row}, _col{col}
+ {
+ // DO NOTHING
+ }
+ Index(int32_t batch, int32_t ch, int32_t row, int32_t col) : _batch{batch}, _ch{ch}, _row{row}, _col{col}
{
// DO NOTHING
}
public:
+ int32_t batch(void) const { return _batch; }
int32_t ch(void) const { return _ch; }
int32_t row(void) const { return _row; }
int32_t col(void) const { return _col; }
public:
+ int32_t &batch(void) { return _batch; }
int32_t &ch(void) { return _ch; }
int32_t &row(void) { return _row; }
int32_t &col(void) { return _col; }
private:
+ int32_t _batch;
int32_t _ch;
int32_t _row;
int32_t _col;
public:
template <typename Callable> IndexIterator &iter(Callable cb)
{
- for (uint32_t ch = 0; ch < _shape.C; ++ch)
+ for (uint32_t batch = 0; batch < _shape.N; ++batch)
{
- for (uint32_t row = 0; row < _shape.H; ++row)
+ for (uint32_t ch = 0; ch < _shape.C; ++ch)
{
- for (uint32_t col = 0; col < _shape.W; ++col)
+ for (uint32_t row = 0; row < _shape.H; ++row)
{
- cb(ch, row, col);
+ for (uint32_t col = 0; col < _shape.W; ++col)
+ {
+ cb(batch, ch, row, col);
+ }
}
}
}
virtual ~Reader() = default;
virtual T at(uint32_t ch, uint32_t row, uint32_t col) const = 0;
+ virtual T at(uint32_t batch, uint32_t ch, uint32_t row, uint32_t col) const = 0;
};
} // namespace feature
struct Shape
{
+ int32_t N; // Batch
int32_t C; // Depth
int32_t H; // Height
int32_t W; // Width
Shape() = default;
- Shape(int32_t depth, int32_t height, int32_t width) : C{depth}, H{height}, W{width}
+ Shape(int32_t depth, int32_t height, int32_t width) : N{1}, C{depth}, H{height}, W{width}
+ {
+ // DO NOTHING
+ }
+ Shape(int32_t batch, int32_t depth, int32_t height, int32_t width) : N{batch}, C{depth}, H{height}, W{width}
{
// DO NOTHING
}
return *ptr;
}
+ float at(uint32_t batch, uint32_t ch, uint32_t row, uint32_t col) const override
+ {
+ const auto offset = feature_index_to_byte_offset(batch, ch, row, col);
+
+ float *ptr = reinterpret_cast<float *>(_tensor->buffer() + offset);
+
+ return *ptr;
+ }
public:
float &at(uint32_t ch, uint32_t row, uint32_t col)
return *ptr;
}
+ float &at(uint32_t batch, uint32_t ch, uint32_t row, uint32_t col)
+ {
+ const auto offset = feature_index_to_byte_offset(batch, ch, row, col);
+
+ float *ptr = reinterpret_cast<float *>(_tensor->buffer() + offset);
+
+ return *ptr;
+ }
private:
size_t feature_index_to_byte_offset(uint32_t ch, uint32_t row, uint32_t col) const
// ARM Compute uses CHW ordering
return _tensor->info()->offset_element_in_bytes(::arm_compute::Coordinates{col, row, ch});
}
+ size_t feature_index_to_byte_offset(uint32_t batch, uint32_t ch, uint32_t row, uint32_t col) const
+ {
+ // ARM Compute uses CHW ordering
+ return _tensor->info()->offset_element_in_bytes(
+ ::arm_compute::Coordinates{col, row, ch, batch});
+ }
private:
::nnfw::util::feature::Shape _shape;
const ::internal::nnapi::feature::Reader<float> from{_shape, tensor.buffer(), _size};
::internal::nnapi::feature::View<float> into{_shape, _base, _size};
- ::nnfw::util::feature::iterate(_shape) << [&](uint32_t ch, uint32_t row, uint32_t col) {
- const auto value = from.at(ch, row, col);
- into.at(ch, row, col) = value;
- };
+ ::nnfw::util::feature::iterate(_shape)
+ << [&](uint32_t batch, uint32_t ch, uint32_t row, uint32_t col) {
+ const auto value = from.at(batch, ch, row, col);
+ into.at(batch, ch, row, col) = value;
+ };
}
private:
const ::internal::nnapi::feature::Reader<float> from{_shape, _base, _size};
::internal::nnapi::feature::View<float> into{_shape, tensor.buffer(), _size};
- ::nnfw::util::feature::iterate(_shape) << [&](uint32_t ch, uint32_t row, uint32_t col) {
- const auto value = from.at(ch, row, col);
- into.at(ch, row, col) = value;
- };
+ ::nnfw::util::feature::iterate(_shape)
+ << [&](uint32_t batch, uint32_t ch, uint32_t row, uint32_t col) {
+ const auto value = from.at(batch, ch, row, col);
+ into.at(batch, ch, row, col) = value;
+ };
}
private:
return arr[index];
}
+ float at(uint32_t batch, uint32_t ch, uint32_t row, uint32_t col) const override
+ {
+ uint32_t index = index_of(_shape, batch, ch, row, col);
+
+ const auto arr = reinterpret_cast<const float *>(_ptr);
+
+ return arr[index];
+ }
private:
nnfw::util::feature::Shape _shape;
return res;
}
+inline uint32_t index_of(const ::nnfw::util::feature::Shape &shape, uint32_t batch, uint32_t ch,
+ uint32_t row, uint32_t col)
+{
+ uint32_t res = 0;
+
+ // NNAPI uses NHWC ordering
+ res += batch * shape.H * shape.W * shape.C;
+ res += row * shape.W * shape.C;
+ res += col * shape.C;
+ res += ch;
+
+ return res;
+}
+
} // namespace feature
} // namespace nnapi
} // namespace internal
return arr[index];
}
+ T at(uint32_t batch, uint32_t ch, uint32_t row, uint32_t col) const override
+ {
+ uint32_t index = index_of(_shape, batch, ch, row, col);
+
+ T *arr = reinterpret_cast<T *>(_ptr);
+
+ return arr[index];
+ }
+
T &at(uint32_t ch, uint32_t row, uint32_t col)
{
uint32_t index = index_of(_shape, ch, row, col);
return arr[index];
}
+ T &at(uint32_t batch, uint32_t ch, uint32_t row, uint32_t col)
+ {
+ uint32_t index = index_of(_shape, batch, ch, row, col);
+
+ T *arr = reinterpret_cast<T *>(_ptr);
+
+ return arr[index];
+ }
+
private:
nnfw::util::feature::Shape _shape;
const ::internal::nnapi::feature::Reader<float> from{featureShape, inputBuffer, inputSize};
::internal::arm_compute::feature::View<float> into{_outputTensor};
- ::nnfw::util::feature::iterate(featureShape) << [&](uint32_t ch, uint32_t row, uint32_t col) {
- const auto value = from.at(ch, row, col);
- into.at(ch, row, col) = value;
- };
+ ::nnfw::util::feature::iterate(featureShape)
+ << [&](uint32_t batch, uint32_t ch, uint32_t row, uint32_t col) {
+ const auto value = from.at(batch, ch, row, col);
+ into.at(batch, ch, row, col) = value;
+ };
}
_outputTensor->unmap(queue);
const ::internal::arm_compute::feature::View<float> from{_inputTensor};
::internal::nnapi::feature::View<float> into{featureShape, outputBuffer, outputSize};
- ::nnfw::util::feature::iterate(featureShape) << [&](uint32_t ch, uint32_t row, uint32_t col) {
- const auto value = from.at(ch, row, col);
- into.at(ch, row, col) = value;
- };
+ ::nnfw::util::feature::iterate(featureShape)
+ << [&](uint32_t batch, uint32_t ch, uint32_t row, uint32_t col) {
+ const auto value = from.at(batch, ch, row, col);
+ into.at(batch, ch, row, col) = value;
+ };
}
_inputTensor->unmap(queue);
const ::internal::nnapi::feature::Reader<float> from{featureShape, inputBuffer, inputSize};
::internal::nnapi::feature::View<float> into{featureShape, outputBuffer, outputSize};
- ::nnfw::util::feature::iterate(featureShape) << [&](uint32_t ch, uint32_t row, uint32_t col) {
- const auto value = from.at(ch, row, col);
- into.at(ch, row, col) = value;
- };
+ ::nnfw::util::feature::iterate(featureShape)
+ << [&](uint32_t batch, uint32_t ch, uint32_t row, uint32_t col) {
+ const auto value = from.at(batch, ch, row, col);
+ into.at(batch, ch, row, col) = value;
+ };
}
}
const ::internal::nnapi::feature::Reader<float> from{featureShape, inputBuffer, inputSize};
::internal::nnapi::feature::View<float> into{featureShape, outputBuffer, outputSize};
- ::nnfw::util::feature::iterate(featureShape) << [&](uint32_t ch, uint32_t row, uint32_t col) {
- const auto value = from.at(ch, row, col);
- into.at(ch, row, col) = value;
- };
+ ::nnfw::util::feature::iterate(featureShape)
+ << [&](uint32_t batch, uint32_t ch, uint32_t row, uint32_t col) {
+ const auto value = from.at(batch, ch, row, col);
+ into.at(batch, ch, row, col) = value;
+ };
}
}
const ::internal::nnapi::feature::Reader<T> from{feature_shape, feature_base, feature_size};
::internal::arm_compute::feature::View<T> into{&tensor};
- ::nnfw::util::feature::iterate(feature_shape) << [&](uint32_t ch, uint32_t row, uint32_t col) {
- const auto value = from.at(ch, row, col);
- into.at(ch, row, col) = value;
- };
+ ::nnfw::util::feature::iterate(feature_shape)
+ << [&](uint32_t batch, uint32_t ch, uint32_t row, uint32_t col) {
+ const auto value = from.at(batch, ch, row, col);
+ into.at(batch, ch, row, col) = value;
+ };
}
template <typename T>
// Inevitably casting must be done.
::internal::nnapi::feature::View<T> into{_shape, reinterpret_cast<uint8_t *>(_base), _size};
- ::nnfw::util::feature::iterate(_shape) << [&](uint32_t ch, uint32_t row, uint32_t col) {
- const auto value = from.at(ch, row, col);
- into.at(ch, row, col) = value;
- };
+ ::nnfw::util::feature::iterate(_shape)
+ << [&](uint32_t batch, uint32_t ch, uint32_t row, uint32_t col) {
+ const auto value = from.at(batch, ch, row, col);
+ into.at(batch, ch, row, col) = value;
+ };
}
private:
_shape, reinterpret_cast<const uint8_t *>(_base), _size};
::internal::arm_compute::feature::View<T> into{&tensor};
- ::nnfw::util::feature::iterate(_shape) << [&](uint32_t ch, uint32_t row, uint32_t col) {
- const auto value = from.at(ch, row, col);
- into.at(ch, row, col) = value;
- };
+ ::nnfw::util::feature::iterate(_shape)
+ << [&](uint32_t batch, uint32_t ch, uint32_t row, uint32_t col) {
+ const auto value = from.at(batch, ch, row, col);
+ into.at(batch, ch, row, col) = value;
+ };
}
private:
// - Dimension(1) -> Height
// - Dimension(2) -> Width
// - Dimension(3) -> Depth
- assert(dim(0) == 1);
+ const auto batch = dim(0);
const auto depth = dim(3);
const auto height = dim(1);
const auto width = dim(2);
- return nnfw::util::feature::Shape(depth, height, width);
+ return nnfw::util::feature::Shape(batch, depth, height, width);
}
nnfw::util::tensor::Shape Shape::asTensor(void) const
return *ptr;
}
+ T at(uint32_t batch, uint32_t ch, uint32_t row, uint32_t col) const override
+ {
+ const auto offset = feature_index_to_byte_offset(batch, ch, row, col);
+
+ T *ptr = reinterpret_cast<T *>(_tensor->buffer() + offset);
+
+ return *ptr;
+ }
+
public:
T &at(uint32_t ch, uint32_t row, uint32_t col)
{
return *ptr;
}
+ T &at(uint32_t batch, uint32_t ch, uint32_t row, uint32_t col)
+ {
+ const auto offset = feature_index_to_byte_offset(batch, ch, row, col);
+
+ T *ptr = reinterpret_cast<T *>(_tensor->buffer() + offset);
+
+ return *ptr;
+ }
+
private:
size_t feature_index_to_byte_offset(uint32_t ch, uint32_t row, uint32_t col) const
{
return _tensor->info()->offset_element_in_bytes(::arm_compute::Coordinates{col, row, ch});
}
+ size_t feature_index_to_byte_offset(uint32_t batch, uint32_t ch, uint32_t row, uint32_t col) const
+ {
+ // ARM Compute uses CHW ordering
+ return _tensor->info()->offset_element_in_bytes(
+ ::arm_compute::Coordinates{col, row, ch, batch});
+ }
+
private:
::arm_compute::ITensor *_tensor;
};
return arr[index];
}
+ T at(uint32_t batch, uint32_t ch, uint32_t row, uint32_t col) const override
+ {
+ uint32_t index = index_of(_shape, batch, ch, row, col);
+
+ const auto arr = reinterpret_cast<const T *>(_ptr);
+
+ return arr[index];
+ }
+
private:
nnfw::util::feature::Shape _shape;
return res;
}
+inline uint32_t index_of(const ::nnfw::util::feature::Shape &shape, uint32_t batch, uint32_t ch,
+ uint32_t row, uint32_t col)
+{
+ uint32_t res = 0;
+
+ // NNAPI uses NHWC ordering
+ res += batch * shape.H * shape.W * shape.C;
+ res += row * shape.W * shape.C;
+ res += col * shape.C;
+ res += ch;
+
+ return res;
+}
+
} // namespace feature
} // namespace nnapi
} // namespace internal
return arr[index];
}
+ T at(uint32_t batch, uint32_t ch, uint32_t row, uint32_t col) const override
+ {
+ uint32_t index = index_of(_shape, batch, ch, row, col);
+
+ T *arr = reinterpret_cast<T *>(_ptr);
+
+ return arr[index];
+ }
T &at(uint32_t ch, uint32_t row, uint32_t col)
{
return arr[index];
}
+ T &at(uint32_t batch, uint32_t ch, uint32_t row, uint32_t col)
+ {
+ uint32_t index = index_of(_shape, batch, ch, row, col);
+
+ T *arr = reinterpret_cast<T *>(_ptr);
+
+ return arr[index];
+ }
private:
nnfw::util::feature::Shape _shape;