From c7ad62faf9e95bc6a8a9cea6affc24ca4250a74e Mon Sep 17 00:00:00 2001 From: =?utf8?q?=EC=98=A4=ED=98=95=EC=84=9D/On-Device=20Lab=28SR=29/Staff?= =?utf8?q?=20Engineer/=EC=82=BC=EC=84=B1=EC=A0=84=EC=9E=90?= Date: Wed, 5 Jun 2019 19:17:49 +0900 Subject: [PATCH] [Interp] Introduce average pool operation in interpreter (#5357) Introduce average pool operation for float type in interpreter Signed-off-by: Hyeongseok Oh --- .../core/src/exec/interp/operations/AvgPool2D.cc | 86 +++++++++++++++++++++- 1 file changed, 85 insertions(+), 1 deletion(-) diff --git a/runtimes/neurun/core/src/exec/interp/operations/AvgPool2D.cc b/runtimes/neurun/core/src/exec/interp/operations/AvgPool2D.cc index c65a6a7..9afb1b3 100644 --- a/runtimes/neurun/core/src/exec/interp/operations/AvgPool2D.cc +++ b/runtimes/neurun/core/src/exec/interp/operations/AvgPool2D.cc @@ -14,7 +14,14 @@ * limitations under the License. */ +#include + +#include "OperationUtil.h" + #include "exec/interp/Registration.h" +#include "model/operation/AvgPool2DNode.h" +#include "util/Utils.h" +#include "util/Padding.h" namespace neurun { @@ -22,10 +29,87 @@ namespace exec { namespace interp { +namespace avgpool2d +{ + +void prepareAvgPool2D(ExecEnv *env, const model::Operation &node) +{ + auto maxpool_node = reinterpret_cast(node); + + const auto in_index = node.getInputs().at(0); + const auto out_index = node.getOutputs().at(0); + + const auto in_tensor = env->tensorAt(in_index); + UNUSED_RELEASE(in_tensor); + + assert(in_tensor->num_dimensions() == 4); + + // TODO handle unspecified output shape: + // calculate output shape using ifm shape, kernel width/height, padding, stride + const auto output_info = env->model().operands.at(out_index).info(); + assert(output_info.total_size() != 0); + env->allocateIfNeeded(out_index, output_info); + + auto out_tensor = env->tensorAt(out_index); + UNUSED_RELEASE(out_tensor); + + // Handle same ifm & ofm data type only + assert(in_tensor->data_type() == out_tensor->data_type()); + assert(out_tensor->num_dimensions() == 4); +} + +void invoke(const ITensor *in_tensor, const ITensor *out_tensor, + const model::operation::AvgPool2DNode::Param ¶m) +{ + const auto ifm_shape = in_tensor->tensorInfo().shape().asFeature(); + const auto ofm_shape = out_tensor->tensorInfo().shape().asFeature(); + const auto padding = neurun::util::calculatePadding(param.padding, ifm_shape, ofm_shape, + param.stride, param.kw, param.kh); + // Calculate + nnfw::cker::AveragePoolParams cker_param; + calculateActivationRange(param.activation, &cker_param.float_activation_min, + &cker_param.float_activation_max); + cker_param.filter_width = param.kw; + cker_param.filter_height = param.kh; + cker_param.padding_values.width = padding.left; + cker_param.padding_values.height = padding.top; + cker_param.stride_width = param.stride.horizontal; + cker_param.stride_height = param.stride.vertical; + + const auto in_shape = convertShape(in_tensor->tensorInfo().shape()); + const auto out_shape = convertShape(out_tensor->tensorInfo().shape()); + const float *in_ptr = reinterpret_cast(in_tensor->bufferRO()); + float *out_ptr = reinterpret_cast(out_tensor->buffer()); + + nnfw::cker::AveragePool(cker_param, in_shape, in_ptr, out_shape, out_ptr); +} + +void invokeAvgPool2D(const ExecEnv *env, const model::Operation &node) +{ + auto maxpool_node = reinterpret_cast(node); + + const auto in_index = node.getInputs().at(0); + const auto out_index = node.getOutputs().at(0); + + // Check lhs shape is same with rhs (with broadcast) + const auto in_tensor = env->tensorAt(in_index); + const auto out_tensor = env->tensorAt(out_index); + + const auto data_type = in_tensor->data_type(); + if (data_type == model::DataType::FLOAT32) + { + invoke(in_tensor, out_tensor, maxpool_node.param()); + } + else + { + throw std::runtime_error{"NYI: Support float only"}; + } +} +} // namespace maxpool2d OpKernel *getAvgPool2DNode() { - static OpKernel kernel = {nullptr, nullptr}; + static OpKernel kernel = {avgpool2d::prepareAvgPool2D, avgpool2d::invokeAvgPool2D}; return &kernel; } -- 2.7.4