const ::internal::tflite::operand::Index block_size_index{node.param().block_size_index};
const ::internal::tflite::operand::Index padding_size_index{node.param().padding_size_index};
+ const auto &output_shape = _ctx.at(output_index).shape();
+ const auto &input_shape = _ctx.at(input_index).shape();
+ const auto &padding_size_shape = _ctx.at(padding_size_index).shape();
+ auto block_size_base = reinterpret_cast<const int32_t *>(_ctx.at(block_size_index).data().base());
+ auto padding_size_base =
+ reinterpret_cast<const int32_t *>(_ctx.at(padding_size_index).data().base());
+
{ // New block for assertions
+ const auto &block_size_shape = _ctx.at(block_size_index).shape();
// Currently, only 4D NHWC input/output op_context are supported.
// The 4D array need to have exactly 2 spatial dimensions.
// TODO: Support arbitrary dimension in SpaceToBatchND.
- assert(_ctx.at(input_index).shape().rank() == 4);
- assert(_ctx.at(output_index).shape().rank() == 4);
- assert(_ctx.at(block_size_index).shape().rank() == 1);
- assert(_ctx.at(padding_size_index).shape().rank() == 2);
-
- const auto &output_shape = _ctx.at(output_index).shape();
- const auto &input_shape = _ctx.at(input_index).shape();
- const auto &block_size_shape = _ctx.at(block_size_index).shape();
- const auto &padding_size_shape = _ctx.at(padding_size_index).shape();
+ assert(input_shape.rank() == 4);
+ assert(output_shape.rank() == 4);
+ assert(block_size_shape.rank() == 1);
+ assert(padding_size_shape.rank() == 2);
assert(output_shape.dim(3) == input_shape.dim(3));
assert(block_size_shape.dim(0) == 2);
assert(padding_size_shape.dim(0) == 2);
assert(padding_size_shape.dim(1) == 2);
+
+ assert(_ctx.at(block_size_index).hasData() && _ctx.at(padding_size_index).hasData());
+ assert(_ctx.at(block_size_index).type() == ANEURALNETWORKS_TENSOR_INT32);
+ assert(_ctx.at(padding_size_index).type() == ANEURALNETWORKS_TENSOR_INT32);
+
+ assert(block_size_base[0] > 0 && block_size_base[1] > 0);
+ assert(output_shape.dim(0) == input_shape.dim(0) * block_size_base[0] * block_size_base[1]);
+ assert(output_shape.dim(1) ==
+ (input_shape.dim(1) + padding_size_base[0] + padding_size_base[1]) / block_size_base[0]);
+ assert(output_shape.dim(2) ==
+ (input_shape.dim(2) + padding_size_base[2] + padding_size_base[3]) / block_size_base[1]);
}
// Set Shape Constraints and TensorInfo
_ctx.at(padding_size_index).scale(),
_ctx.at(padding_size_index).zeroPoint()));
- if (_ctx.at(block_size_index).hasData())
- {
- const auto rank = _ctx.at(input_index).shape().rank();
- const auto num_of_block_size = _ctx.at(block_size_index).shape().asVector();
- auto block_size_base = _ctx.at(block_size_index).data().base();
- auto block_size_type = _ctx.at(block_size_index).type();
-
- switch (block_size_type)
- {
- case ANEURALNETWORKS_TENSOR_INT32:
- {
- auto initializer = [block_size_base, num_of_block_size,
- rank](::arm_compute::ITensor &tensor) {
- assert(num_of_block_size < 4);
- for (size_t n = 0; n < num_of_block_size; ++n)
- {
- const int32_t *from = reinterpret_cast<const int32_t *>(block_size_base) + n;
- int32_t *into = reinterpret_cast<int32_t *>(
- tensor.ptr_to_element({ToARMComputeAxis(rank, n + 1).value()}));
- *into = *from;
- }
- };
- _builder.addInitializer(block_size_index, initializer);
+ { // Append block_size initializer
+ auto initializer = [block_size_base](::arm_compute::ITensor &tensor) {
+ const auto block_size_y = block_size_base[0];
+ const auto block_size_x = block_size_base[1];
- break;
- }
- default:
- {
- throw std::runtime_error("Not supported");
- }
- }
+ auto into = reinterpret_cast<int32_t *>(tensor.ptr_to_element({0}));
+ into[0] = block_size_x;
+ into[1] = block_size_y;
+ };
+ _builder.addInitializer(block_size_index, initializer);
}
- if (_ctx.at(padding_size_index).hasData())
- {
- const auto padding_size_shape = _ctx.at(padding_size_index).shape();
- const auto rank = _ctx.at(input_index).shape().rank();
- auto padding_size_base = _ctx.at(padding_size_index).data().base();
- auto padding_size_type = _ctx.at(padding_size_index).type();
-
- switch (padding_size_type)
- {
- case ANEURALNETWORKS_TENSOR_INT32:
+ { // Append padding_size initializer
+ auto initializer = [padding_size_base, padding_size_shape](::arm_compute::ITensor &tensor) {
+ // If n == 0, then the axis is the height
+ // If n == 1, then the axis is the width
+ for (size_t n = 0; n < padding_size_shape.dim(0); ++n)
{
- auto initializer = [padding_size_base, padding_size_shape,
- rank](::arm_compute::ITensor &tensor) {
- assert(padding_size_shape.dim(1) == 2);
- assert(padding_size_shape.dim(0) < 4);
- for (size_t n = 0; n < padding_size_shape.dim(0); ++n)
- {
- const int32_t *from = reinterpret_cast<const int32_t *>(padding_size_base) +
- (n * padding_size_shape.dim(1));
- int32_t *into = reinterpret_cast<int32_t *>(
- tensor.ptr_to_element({0, ToARMComputeAxis(rank, n + 1).value()}));
- into[0] = from[0];
- into[1] = from[1];
- }
- };
- _builder.addInitializer(padding_size_index, initializer);
- break;
+ const auto from = padding_size_base + (n * padding_size_shape.dim(1));
+ auto into = reinterpret_cast<int32_t *>(tensor.ptr_to_element({0, 1 - n}));
+ into[0] = from[0];
+ into[1] = from[1];
}
- default:
- {
- throw std::runtime_error("Not supported");
- }
- }
+ };
+ _builder.addInitializer(padding_size_index, initializer);
}
// Construct operation parameters