From: 오형석/On-Device Lab(SR)/Staff Engineer/삼성전자 Date: Fri, 2 Aug 2019 07:38:57 +0000 (+0900) Subject: [neurun] Use shape inference in interpreter (#6012) X-Git-Tag: submit/tizen/20190809.050447~220 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=d64a04904dab9fc36a628a3f3a78dabc22d0dab4;p=platform%2Fcore%2Fml%2Fnnfw.git [neurun] Use shape inference in interpreter (#6012) * Use shape inference in interpreter To support dynamic shape, use shape inference: convolution, maxpool, averagepool Signed-off-by: Hyeongseok Oh * Fix unspecified shape checking and shape inference for depthwise conv --- diff --git a/runtimes/neurun/core/src/exec/interp/operations/AvgPool2D.cc b/runtimes/neurun/core/src/exec/interp/operations/AvgPool2D.cc index 886eb15..ab612b0 100644 --- a/runtimes/neurun/core/src/exec/interp/operations/AvgPool2D.cc +++ b/runtimes/neurun/core/src/exec/interp/operations/AvgPool2D.cc @@ -22,6 +22,7 @@ #include "model/operation/AvgPool2DNode.h" #include "util/Utils.h" #include "util/Padding.h" +#include "util/ShapeInference.h" #include "misc/polymorphic_downcast.h" namespace neurun @@ -43,11 +44,20 @@ void prepareAvgPool2D(ExecEnv *env, const model::Operation &node) assert(in_tensor->num_dimensions() == 4); - // TODO handle unspecified output shape: - // calculate output shape using ifm shape, kernel width/height, padding, stride const auto output_info = env->model().operands.at(out_index).info(); - assert(output_info.total_size() != 0); - env->allocateIfNeeded(out_index, output_info); + if (output_info.total_size() == 0) + { + // Handle unspecified output shape + const auto &avgpool_node = + nnfw::misc::polymorphic_downcast(node); + const auto infered_output_shapes = + shape_inference::inferAvgPoolShape(in_tensor->tensorInfo().shape(), avgpool_node.param()); + env->allocateIfNeeded(out_index, {infered_output_shapes[0], output_info.typeInfo()}); + } + else + { + env->allocateIfNeeded(out_index, output_info); + } auto out_tensor = env->tensorAt(out_index); UNUSED_RELEASE(out_tensor); diff --git a/runtimes/neurun/core/src/exec/interp/operations/Conv2D.cc b/runtimes/neurun/core/src/exec/interp/operations/Conv2D.cc index 9847d2a..bc4eaab 100644 --- a/runtimes/neurun/core/src/exec/interp/operations/Conv2D.cc +++ b/runtimes/neurun/core/src/exec/interp/operations/Conv2D.cc @@ -22,6 +22,7 @@ #include "model/operation/Conv2DNode.h" #include "util/Utils.h" #include "util/Padding.h" +#include "util/ShapeInference.h" #include "misc/polymorphic_downcast.h" namespace neurun @@ -52,11 +53,20 @@ void prepareConv2D(ExecEnv *env, const model::Operation &node) UNUSED_RELEASE(kernel_tensor); UNUSED_RELEASE(bias_tensor); - // TODO handle unspecified output shape: - // calculate output shape using ifm shape, kernel shape, padding, stride const auto output_info = env->model().operands.at(out_index).info(); - assert(output_info.total_size() != 0); - env->allocateIfNeeded(out_index, output_info); + if (output_info.total_size() == 0) + { + // Handle unspecified output shape + const auto &conv_node = + nnfw::misc::polymorphic_downcast(node); + const auto infered_output_shapes = shape_inference::inferConv2DShape( + in_tensor->tensorInfo().shape(), kernel_tensor->tensorInfo().shape(), conv_node.param()); + env->allocateIfNeeded(out_index, {infered_output_shapes[0], output_info.typeInfo()}); + } + else + { + env->allocateIfNeeded(out_index, output_info); + } auto out_tensor = env->tensorAt(out_index); UNUSED_RELEASE(out_tensor); diff --git a/runtimes/neurun/core/src/exec/interp/operations/DepthwiseConv.cc b/runtimes/neurun/core/src/exec/interp/operations/DepthwiseConv.cc index b728151..df327ce 100644 --- a/runtimes/neurun/core/src/exec/interp/operations/DepthwiseConv.cc +++ b/runtimes/neurun/core/src/exec/interp/operations/DepthwiseConv.cc @@ -15,6 +15,7 @@ */ #include +#include #include "OperationUtil.h" @@ -22,6 +23,7 @@ #include "model/operation/DepthwiseConv2DNode.h" #include "util/Padding.h" #include "util/Utils.h" +#include "util/ShapeInference.h" namespace neurun { @@ -55,8 +57,20 @@ void prepareDepthwiseConv(ExecEnv *env, const model::Operation &node) // TODO handle unspecified output shape: // calculate output shape using ifm shape, kernel shape, padding, stride const auto output_info = env->model().operands.at(out_index).info(); - assert(output_info.total_size() != 0); - env->allocateIfNeeded(out_index, output_info); + if (output_info.total_size() == 0) + { + // Handle unspecified output shape + const auto &depth_conv_node = + nnfw::misc::polymorphic_downcast(node); + const auto infered_output_shapes = shape_inference::inferDepthwiseConv2DShape( + in_tensor->tensorInfo().shape(), kernel_tensor->tensorInfo().shape(), + depth_conv_node.param()); + env->allocateIfNeeded(out_index, {infered_output_shapes[0], output_info.typeInfo()}); + } + else + { + env->allocateIfNeeded(out_index, output_info); + } auto out_tensor = env->tensorAt(out_index); UNUSED_RELEASE(out_tensor); diff --git a/runtimes/neurun/core/src/exec/interp/operations/MaxPool2D.cc b/runtimes/neurun/core/src/exec/interp/operations/MaxPool2D.cc index 8f49852..6b91ed0 100644 --- a/runtimes/neurun/core/src/exec/interp/operations/MaxPool2D.cc +++ b/runtimes/neurun/core/src/exec/interp/operations/MaxPool2D.cc @@ -22,6 +22,7 @@ #include "model/operation/MaxPool2DNode.h" #include "util/Utils.h" #include "util/Padding.h" +#include "util/ShapeInference.h" #include "misc/polymorphic_downcast.h" namespace neurun @@ -43,11 +44,20 @@ void prepareMaxPool2D(ExecEnv *env, const model::Operation &node) assert(in_tensor->num_dimensions() == 4); UNUSED_RELEASE(in_tensor); - // TODO handle unspecified output shape: - // calculate output shape using ifm shape, kernel width/height, padding, stride const auto output_info = env->model().operands.at(out_index).info(); - assert(output_info.total_size() != 0); - env->allocateIfNeeded(out_index, output_info); + if (output_info.total_size() == 0) + { + // Handle unspecified output shape + const auto &maxpool_node = + nnfw::misc::polymorphic_downcast(node); + const auto infered_output_shapes = + shape_inference::inferMaxPoolShape(in_tensor->tensorInfo().shape(), maxpool_node.param()); + env->allocateIfNeeded(out_index, {infered_output_shapes[0], output_info.typeInfo()}); + } + else + { + env->allocateIfNeeded(out_index, output_info); + } auto out_tensor = env->tensorAt(out_index); UNUSED_RELEASE(out_tensor);