From e711ec0f4eababdde97004c2e85454724326bbf6 Mon Sep 17 00:00:00 2001 From: =?utf8?q?=EC=9E=A5=EC=A7=80=EC=84=AD/On-Device=20Lab=28SR=29/Enginee?= =?utf8?q?r/=EC=82=BC=EC=84=B1=EC=A0=84=EC=9E=90?= Date: Thu, 4 Apr 2019 18:21:17 +0900 Subject: [PATCH] [PACL] Fix wrong setting of for SpaceToBatch in NHWC (#4366) This commit fixes wrong setting of block_size and padding size for SpaceToBatch in NHWC. - Change setting of them to only support 4-dimensional input and the 2-dimensional spatial shape. Signed-off-by: jiseob.jang --- runtimes/pure_arm_compute/src/compilation.cc | 113 ++++++++++----------------- 1 file changed, 43 insertions(+), 70 deletions(-) diff --git a/runtimes/pure_arm_compute/src/compilation.cc b/runtimes/pure_arm_compute/src/compilation.cc index d028e01..ae73840 100644 --- a/runtimes/pure_arm_compute/src/compilation.cc +++ b/runtimes/pure_arm_compute/src/compilation.cc @@ -4342,25 +4342,39 @@ void Planner::visit(const ::internal::tflite::op::SpaceToBatchND::Node &node) const ::internal::tflite::operand::Index block_size_index{node.param().block_size_index}; const ::internal::tflite::operand::Index padding_size_index{node.param().padding_size_index}; + const auto &output_shape = _ctx.at(output_index).shape(); + const auto &input_shape = _ctx.at(input_index).shape(); + const auto &padding_size_shape = _ctx.at(padding_size_index).shape(); + auto block_size_base = reinterpret_cast(_ctx.at(block_size_index).data().base()); + auto padding_size_base = + reinterpret_cast(_ctx.at(padding_size_index).data().base()); + { // New block for assertions + const auto &block_size_shape = _ctx.at(block_size_index).shape(); // Currently, only 4D NHWC input/output op_context are supported. // The 4D array need to have exactly 2 spatial dimensions. // TODO: Support arbitrary dimension in SpaceToBatchND. - assert(_ctx.at(input_index).shape().rank() == 4); - assert(_ctx.at(output_index).shape().rank() == 4); - assert(_ctx.at(block_size_index).shape().rank() == 1); - assert(_ctx.at(padding_size_index).shape().rank() == 2); - - const auto &output_shape = _ctx.at(output_index).shape(); - const auto &input_shape = _ctx.at(input_index).shape(); - const auto &block_size_shape = _ctx.at(block_size_index).shape(); - const auto &padding_size_shape = _ctx.at(padding_size_index).shape(); + assert(input_shape.rank() == 4); + assert(output_shape.rank() == 4); + assert(block_size_shape.rank() == 1); + assert(padding_size_shape.rank() == 2); assert(output_shape.dim(3) == input_shape.dim(3)); assert(block_size_shape.dim(0) == 2); assert(padding_size_shape.dim(0) == 2); assert(padding_size_shape.dim(1) == 2); + + assert(_ctx.at(block_size_index).hasData() && _ctx.at(padding_size_index).hasData()); + assert(_ctx.at(block_size_index).type() == ANEURALNETWORKS_TENSOR_INT32); + assert(_ctx.at(padding_size_index).type() == ANEURALNETWORKS_TENSOR_INT32); + + assert(block_size_base[0] > 0 && block_size_base[1] > 0); + assert(output_shape.dim(0) == input_shape.dim(0) * block_size_base[0] * block_size_base[1]); + assert(output_shape.dim(1) == + (input_shape.dim(1) + padding_size_base[0] + padding_size_base[1]) / block_size_base[0]); + assert(output_shape.dim(2) == + (input_shape.dim(2) + padding_size_base[2] + padding_size_base[3]) / block_size_base[1]); } // Set Shape Constraints and TensorInfo @@ -4385,72 +4399,31 @@ void Planner::visit(const ::internal::tflite::op::SpaceToBatchND::Node &node) _ctx.at(padding_size_index).scale(), _ctx.at(padding_size_index).zeroPoint())); - if (_ctx.at(block_size_index).hasData()) - { - const auto rank = _ctx.at(input_index).shape().rank(); - const auto num_of_block_size = _ctx.at(block_size_index).shape().asVector(); - auto block_size_base = _ctx.at(block_size_index).data().base(); - auto block_size_type = _ctx.at(block_size_index).type(); - - switch (block_size_type) - { - case ANEURALNETWORKS_TENSOR_INT32: - { - auto initializer = [block_size_base, num_of_block_size, - rank](::arm_compute::ITensor &tensor) { - assert(num_of_block_size < 4); - for (size_t n = 0; n < num_of_block_size; ++n) - { - const int32_t *from = reinterpret_cast(block_size_base) + n; - int32_t *into = reinterpret_cast( - tensor.ptr_to_element({ToARMComputeAxis(rank, n + 1).value()})); - *into = *from; - } - }; - _builder.addInitializer(block_size_index, initializer); + { // Append block_size initializer + auto initializer = [block_size_base](::arm_compute::ITensor &tensor) { + const auto block_size_y = block_size_base[0]; + const auto block_size_x = block_size_base[1]; - break; - } - default: - { - throw std::runtime_error("Not supported"); - } - } + auto into = reinterpret_cast(tensor.ptr_to_element({0})); + into[0] = block_size_x; + into[1] = block_size_y; + }; + _builder.addInitializer(block_size_index, initializer); } - if (_ctx.at(padding_size_index).hasData()) - { - const auto padding_size_shape = _ctx.at(padding_size_index).shape(); - const auto rank = _ctx.at(input_index).shape().rank(); - auto padding_size_base = _ctx.at(padding_size_index).data().base(); - auto padding_size_type = _ctx.at(padding_size_index).type(); - - switch (padding_size_type) - { - case ANEURALNETWORKS_TENSOR_INT32: + { // Append padding_size initializer + auto initializer = [padding_size_base, padding_size_shape](::arm_compute::ITensor &tensor) { + // If n == 0, then the axis is the height + // If n == 1, then the axis is the width + for (size_t n = 0; n < padding_size_shape.dim(0); ++n) { - auto initializer = [padding_size_base, padding_size_shape, - rank](::arm_compute::ITensor &tensor) { - assert(padding_size_shape.dim(1) == 2); - assert(padding_size_shape.dim(0) < 4); - for (size_t n = 0; n < padding_size_shape.dim(0); ++n) - { - const int32_t *from = reinterpret_cast(padding_size_base) + - (n * padding_size_shape.dim(1)); - int32_t *into = reinterpret_cast( - tensor.ptr_to_element({0, ToARMComputeAxis(rank, n + 1).value()})); - into[0] = from[0]; - into[1] = from[1]; - } - }; - _builder.addInitializer(padding_size_index, initializer); - break; + const auto from = padding_size_base + (n * padding_size_shape.dim(1)); + auto into = reinterpret_cast(tensor.ptr_to_element({0, 1 - n})); + into[0] = from[0]; + into[1] = from[1]; } - default: - { - throw std::runtime_error("Not supported"); - } - } + }; + _builder.addInitializer(padding_size_index, initializer); } // Construct operation parameters -- 2.7.4