[neurun] Use shape inference in interpreter (#6012)
author오형석/On-Device Lab(SR)/Staff Engineer/삼성전자 <hseok82.oh@samsung.com>
Fri, 2 Aug 2019 07:38:57 +0000 (16:38 +0900)
committer이춘석/On-Device Lab(SR)/Staff Engineer/삼성전자 <chunseok.lee@samsung.com>
Fri, 2 Aug 2019 07:38:57 +0000 (16:38 +0900)
* Use shape inference in interpreter

To support dynamic shape, use shape inference: convolution, maxpool, averagepool

Signed-off-by: Hyeongseok Oh <hseok82.oh@samsung.com>
* Fix unspecified shape checking and shape inference for depthwise conv

runtimes/neurun/core/src/exec/interp/operations/AvgPool2D.cc
runtimes/neurun/core/src/exec/interp/operations/Conv2D.cc
runtimes/neurun/core/src/exec/interp/operations/DepthwiseConv.cc
runtimes/neurun/core/src/exec/interp/operations/MaxPool2D.cc

index 886eb15..ab612b0 100644 (file)
@@ -22,6 +22,7 @@
 #include "model/operation/AvgPool2DNode.h"
 #include "util/Utils.h"
 #include "util/Padding.h"
+#include "util/ShapeInference.h"
 #include "misc/polymorphic_downcast.h"
 
 namespace neurun
@@ -43,11 +44,20 @@ void prepareAvgPool2D(ExecEnv *env, const model::Operation &node)
 
   assert(in_tensor->num_dimensions() == 4);
 
-  // TODO handle unspecified output shape:
-  //      calculate output shape using ifm shape, kernel width/height, padding, stride
   const auto output_info = env->model().operands.at(out_index).info();
-  assert(output_info.total_size() != 0);
-  env->allocateIfNeeded(out_index, output_info);
+  if (output_info.total_size() == 0)
+  {
+    // Handle unspecified output shape
+    const auto &avgpool_node =
+        nnfw::misc::polymorphic_downcast<const model::operation::AvgPool2DNode &>(node);
+    const auto infered_output_shapes =
+        shape_inference::inferAvgPoolShape(in_tensor->tensorInfo().shape(), avgpool_node.param());
+    env->allocateIfNeeded(out_index, {infered_output_shapes[0], output_info.typeInfo()});
+  }
+  else
+  {
+    env->allocateIfNeeded(out_index, output_info);
+  }
 
   auto out_tensor = env->tensorAt(out_index);
   UNUSED_RELEASE(out_tensor);
index 9847d2a..bc4eaab 100644 (file)
@@ -22,6 +22,7 @@
 #include "model/operation/Conv2DNode.h"
 #include "util/Utils.h"
 #include "util/Padding.h"
+#include "util/ShapeInference.h"
 #include "misc/polymorphic_downcast.h"
 
 namespace neurun
@@ -52,11 +53,20 @@ void prepareConv2D(ExecEnv *env, const model::Operation &node)
   UNUSED_RELEASE(kernel_tensor);
   UNUSED_RELEASE(bias_tensor);
 
-  // TODO handle unspecified output shape:
-  //      calculate output shape using ifm shape, kernel shape, padding, stride
   const auto output_info = env->model().operands.at(out_index).info();
-  assert(output_info.total_size() != 0);
-  env->allocateIfNeeded(out_index, output_info);
+  if (output_info.total_size() == 0)
+  {
+    // Handle unspecified output shape
+    const auto &conv_node =
+        nnfw::misc::polymorphic_downcast<const model::operation::Conv2DNode &>(node);
+    const auto infered_output_shapes = shape_inference::inferConv2DShape(
+        in_tensor->tensorInfo().shape(), kernel_tensor->tensorInfo().shape(), conv_node.param());
+    env->allocateIfNeeded(out_index, {infered_output_shapes[0], output_info.typeInfo()});
+  }
+  else
+  {
+    env->allocateIfNeeded(out_index, output_info);
+  }
 
   auto out_tensor = env->tensorAt(out_index);
   UNUSED_RELEASE(out_tensor);
index b728151..df327ce 100644 (file)
@@ -15,6 +15,7 @@
  */
 
 #include <cker/operation/DepthwiseConv.h>
+#include <misc/polymorphic_downcast.h>
 
 #include "OperationUtil.h"
 
@@ -22,6 +23,7 @@
 #include "model/operation/DepthwiseConv2DNode.h"
 #include "util/Padding.h"
 #include "util/Utils.h"
+#include "util/ShapeInference.h"
 
 namespace neurun
 {
@@ -55,8 +57,20 @@ void prepareDepthwiseConv(ExecEnv *env, const model::Operation &node)
   // TODO handle unspecified output shape:
   //      calculate output shape using ifm shape, kernel shape, padding, stride
   const auto output_info = env->model().operands.at(out_index).info();
-  assert(output_info.total_size() != 0);
-  env->allocateIfNeeded(out_index, output_info);
+  if (output_info.total_size() == 0)
+  {
+    // Handle unspecified output shape
+    const auto &depth_conv_node =
+        nnfw::misc::polymorphic_downcast<const model::operation::DepthwiseConv2DNode &>(node);
+    const auto infered_output_shapes = shape_inference::inferDepthwiseConv2DShape(
+        in_tensor->tensorInfo().shape(), kernel_tensor->tensorInfo().shape(),
+        depth_conv_node.param());
+    env->allocateIfNeeded(out_index, {infered_output_shapes[0], output_info.typeInfo()});
+  }
+  else
+  {
+    env->allocateIfNeeded(out_index, output_info);
+  }
 
   auto out_tensor = env->tensorAt(out_index);
   UNUSED_RELEASE(out_tensor);
index 8f49852..6b91ed0 100644 (file)
@@ -22,6 +22,7 @@
 #include "model/operation/MaxPool2DNode.h"
 #include "util/Utils.h"
 #include "util/Padding.h"
+#include "util/ShapeInference.h"
 #include "misc/polymorphic_downcast.h"
 
 namespace neurun
@@ -43,11 +44,20 @@ void prepareMaxPool2D(ExecEnv *env, const model::Operation &node)
   assert(in_tensor->num_dimensions() == 4);
   UNUSED_RELEASE(in_tensor);
 
-  // TODO handle unspecified output shape:
-  //      calculate output shape using ifm shape, kernel width/height, padding, stride
   const auto output_info = env->model().operands.at(out_index).info();
-  assert(output_info.total_size() != 0);
-  env->allocateIfNeeded(out_index, output_info);
+  if (output_info.total_size() == 0)
+  {
+    // Handle unspecified output shape
+    const auto &maxpool_node =
+        nnfw::misc::polymorphic_downcast<const model::operation::MaxPool2DNode &>(node);
+    const auto infered_output_shapes =
+        shape_inference::inferMaxPoolShape(in_tensor->tensorInfo().shape(), maxpool_node.param());
+    env->allocateIfNeeded(out_index, {infered_output_shapes[0], output_info.typeInfo()});
+  }
+  else
+  {
+    env->allocateIfNeeded(out_index, output_info);
+  }
 
   auto out_tensor = env->tensorAt(out_index);
   UNUSED_RELEASE(out_tensor);