Unify to use one function of asTensorShape() and asTensorInfo() (#2471)
author장지섭/동작제어Lab(SR)/Engineer/삼성전자 <jiseob.jang@samsung.com>
Thu, 30 Aug 2018 22:23:31 +0000 (07:23 +0900)
committer박세희/동작제어Lab(SR)/Principal Engineer/삼성전자 <saehie.park@samsung.com>
Thu, 30 Aug 2018 22:23:31 +0000 (07:23 +0900)
* Unify to use one function of asTensorShape() and asTensorInfo()

This commit unifies to use one function(no overloading) of asTensorShape() and asTensorInfo().
- Export asTensorShape() out of asTensorInfo() to apply conditionally dimension correction.
- Unify to use one function to eliminate unnecessary complexity.

Signed-off-by: jiseob.jang <jiseob.jang@samsung.com>
* Remove unused overloading functions

This commit remove unused overloading functions.
 - asTensorInfo()
 - asTensorShape()

Signed-off-by: jiseob.jang <jiseob.jang@samsung.com>
runtimes/pure_arm_compute/src/compilation.cc
runtimes/pure_arm_compute/src/internal/arm_compute/Cast.h

index 6958fd6..6912698 100644 (file)
@@ -490,9 +490,9 @@ void Planner::visit(const ::internal::tflite::op::Add::Node &node)
 
   // TODO Should move to the place where the operand is handled, if it is possible.
   // Set Shape Constraints and TensorInfo
-  _builder.addShapeConstr(ofm_index,
-                          asTensorInfo(_ctx.at(ofm_index).shape(), _ctx.at(ofm_index).type(),
-                                       _ctx.at(ofm_index).scale(), _ctx.at(ofm_index).zeroPoint()));
+  _builder.addShapeConstr(
+      ofm_index, asTensorInfo(asTensorShape(_ctx.at(ofm_index).shape()), _ctx.at(ofm_index).type(),
+                              _ctx.at(ofm_index).scale(), _ctx.at(ofm_index).zeroPoint()));
 
   if (!(_ctx.at(lhs_index).shape() == _ctx.at(rhs_index).shape()))
   {
@@ -503,12 +503,12 @@ void Planner::visit(const ::internal::tflite::op::Add::Node &node)
     const_cast<::internal::tflite::operand::Shape &>(_ctx.at(rhs_index).shape())
         .extendRank(broadcast_rank);
   }
-  _builder.addShapeConstr(lhs_index,
-                          asTensorInfo(_ctx.at(lhs_index).shape(), _ctx.at(lhs_index).type(),
-                                       _ctx.at(lhs_index).scale(), _ctx.at(lhs_index).zeroPoint()));
-  _builder.addShapeConstr(rhs_index,
-                          asTensorInfo(_ctx.at(rhs_index).shape(), _ctx.at(rhs_index).type(),
-                                       _ctx.at(rhs_index).scale(), _ctx.at(rhs_index).zeroPoint()));
+  _builder.addShapeConstr(
+      lhs_index, asTensorInfo(asTensorShape(_ctx.at(lhs_index).shape()), _ctx.at(lhs_index).type(),
+                              _ctx.at(lhs_index).scale(), _ctx.at(lhs_index).zeroPoint()));
+  _builder.addShapeConstr(
+      rhs_index, asTensorInfo(asTensorShape(_ctx.at(rhs_index).shape()), _ctx.at(rhs_index).type(),
+                              _ctx.at(rhs_index).scale(), _ctx.at(rhs_index).zeroPoint()));
 
   // Construct operation parameters
   struct Param
@@ -589,9 +589,9 @@ void Planner::visit(const ::internal::tflite::op::Sub::Node &node)
   const ::internal::tflite::operand::Index activation_index{node.param().activation_index};
 
   // Set Shape Constraints and TensorInfo
-  _builder.addShapeConstr(ofm_index,
-                          asTensorInfo(_ctx.at(ofm_index).shape(), _ctx.at(ofm_index).type(),
-                                       _ctx.at(ofm_index).scale(), _ctx.at(ofm_index).zeroPoint()));
+  _builder.addShapeConstr(
+      ofm_index, asTensorInfo(asTensorShape(_ctx.at(ofm_index).shape()), _ctx.at(ofm_index).type(),
+                              _ctx.at(ofm_index).scale(), _ctx.at(ofm_index).zeroPoint()));
 
   if (!(_ctx.at(lhs_index).shape() == _ctx.at(rhs_index).shape()))
   {
@@ -602,12 +602,12 @@ void Planner::visit(const ::internal::tflite::op::Sub::Node &node)
     const_cast<::internal::tflite::operand::Shape &>(_ctx.at(rhs_index).shape())
         .extendRank(broadcast_rank);
   }
-  _builder.addShapeConstr(lhs_index,
-                          asTensorInfo(_ctx.at(lhs_index).shape(), _ctx.at(lhs_index).type(),
-                                       _ctx.at(lhs_index).scale(), _ctx.at(lhs_index).zeroPoint()));
-  _builder.addShapeConstr(rhs_index,
-                          asTensorInfo(_ctx.at(rhs_index).shape(), _ctx.at(rhs_index).type(),
-                                       _ctx.at(rhs_index).scale(), _ctx.at(rhs_index).zeroPoint()));
+  _builder.addShapeConstr(
+      lhs_index, asTensorInfo(asTensorShape(_ctx.at(lhs_index).shape()), _ctx.at(lhs_index).type(),
+                              _ctx.at(lhs_index).scale(), _ctx.at(lhs_index).zeroPoint()));
+  _builder.addShapeConstr(
+      rhs_index, asTensorInfo(asTensorShape(_ctx.at(rhs_index).shape()), _ctx.at(rhs_index).type(),
+                              _ctx.at(rhs_index).scale(), _ctx.at(rhs_index).zeroPoint()));
 
   // Construct operation parameters
   struct Param
@@ -671,9 +671,9 @@ void Planner::visit(const ::internal::tflite::op::Mul::Node &node)
     assert(_ctx.at(ofm_index).scale() > _ctx.at(lhs_index).scale() * _ctx.at(rhs_index).scale());
   }
   // Set Shape Constraints and TensorInfo
-  _builder.addShapeConstr(ofm_index,
-                          asTensorInfo(_ctx.at(ofm_index).shape(), _ctx.at(ofm_index).type(),
-                                       _ctx.at(ofm_index).scale(), _ctx.at(ofm_index).zeroPoint()));
+  _builder.addShapeConstr(
+      ofm_index, asTensorInfo(asTensorShape(_ctx.at(ofm_index).shape()), _ctx.at(ofm_index).type(),
+                              _ctx.at(ofm_index).scale(), _ctx.at(ofm_index).zeroPoint()));
 
   if (!(_ctx.at(lhs_index).shape() == _ctx.at(rhs_index).shape()))
   {
@@ -684,12 +684,12 @@ void Planner::visit(const ::internal::tflite::op::Mul::Node &node)
     const_cast<::internal::tflite::operand::Shape &>(_ctx.at(rhs_index).shape())
         .extendRank(broadcast_rank);
   }
-  _builder.addShapeConstr(lhs_index,
-                          asTensorInfo(_ctx.at(lhs_index).shape(), _ctx.at(lhs_index).type(),
-                                       _ctx.at(lhs_index).scale(), _ctx.at(lhs_index).zeroPoint()));
-  _builder.addShapeConstr(rhs_index,
-                          asTensorInfo(_ctx.at(rhs_index).shape(), _ctx.at(rhs_index).type(),
-                                       _ctx.at(rhs_index).scale(), _ctx.at(rhs_index).zeroPoint()));
+  _builder.addShapeConstr(
+      lhs_index, asTensorInfo(asTensorShape(_ctx.at(lhs_index).shape()), _ctx.at(lhs_index).type(),
+                              _ctx.at(lhs_index).scale(), _ctx.at(lhs_index).zeroPoint()));
+  _builder.addShapeConstr(
+      rhs_index, asTensorInfo(asTensorShape(_ctx.at(rhs_index).shape()), _ctx.at(rhs_index).type(),
+                              _ctx.at(rhs_index).scale(), _ctx.at(rhs_index).zeroPoint()));
 
   struct Param
   {
@@ -752,9 +752,9 @@ void Planner::visit(const ::internal::tflite::op::Div::Node &node)
   const ::internal::tflite::operand::Index activation_index{node.param().activation_index};
 
   // Set Shape Constraints and TensorInfo
-  _builder.addShapeConstr(ofm_index,
-                          asTensorInfo(_ctx.at(ofm_index).shape(), _ctx.at(ofm_index).type(),
-                                       _ctx.at(ofm_index).scale(), _ctx.at(ofm_index).zeroPoint()));
+  _builder.addShapeConstr(
+      ofm_index, asTensorInfo(asTensorShape(_ctx.at(ofm_index).shape()), _ctx.at(ofm_index).type(),
+                              _ctx.at(ofm_index).scale(), _ctx.at(ofm_index).zeroPoint()));
 
   if (!(_ctx.at(lhs_index).shape() == _ctx.at(rhs_index).shape()))
   {
@@ -766,12 +766,12 @@ void Planner::visit(const ::internal::tflite::op::Div::Node &node)
         .extendRank(broadcast_rank);
   }
 
-  _builder.addShapeConstr(lhs_index,
-                          asTensorInfo(_ctx.at(lhs_index).shape(), _ctx.at(lhs_index).type(),
-                                       _ctx.at(lhs_index).scale(), _ctx.at(lhs_index).zeroPoint()));
-  _builder.addShapeConstr(rhs_index,
-                          asTensorInfo(_ctx.at(rhs_index).shape(), _ctx.at(rhs_index).type(),
-                                       _ctx.at(rhs_index).scale(), _ctx.at(rhs_index).zeroPoint()));
+  _builder.addShapeConstr(
+      lhs_index, asTensorInfo(asTensorShape(_ctx.at(lhs_index).shape()), _ctx.at(lhs_index).type(),
+                              _ctx.at(lhs_index).scale(), _ctx.at(lhs_index).zeroPoint()));
+  _builder.addShapeConstr(
+      rhs_index, asTensorInfo(asTensorShape(_ctx.at(rhs_index).shape()), _ctx.at(rhs_index).type(),
+                              _ctx.at(rhs_index).scale(), _ctx.at(rhs_index).zeroPoint()));
 
   // Construct operation parameters
   struct Param
@@ -847,18 +847,19 @@ void Planner::visit(const ::internal::tflite::op::Conv2D::Implicit::Node &node)
 
   // TODO Should move to the place where the operand is handled, if it is possible.
   // Set Shape Constraints and TensorInfo
-  _builder.addShapeConstr(ofm_index,
-                          asTensorInfo(ofm_shape, _ctx.at(ofm_index).type(),
-                                       _ctx.at(ofm_index).scale(), _ctx.at(ofm_index).zeroPoint()));
-  _builder.addShapeConstr(ifm_index,
-                          asTensorInfo(ifm_shape, _ctx.at(ifm_index).type(),
-                                       _ctx.at(ifm_index).scale(), _ctx.at(ifm_index).zeroPoint()));
-  _builder.addShapeConstr(ker_index,
-                          asTensorInfo(ker_shape, _ctx.at(ker_index).type(),
-                                       _ctx.at(ker_index).scale(), _ctx.at(ker_index).zeroPoint()));
-  _builder.addShapeConstr(bias_index, asTensorInfo(bias_size, _ctx.at(bias_index).type(),
-                                                   _ctx.at(bias_index).scale(),
-                                                   _ctx.at(bias_index).zeroPoint()));
+  _builder.addShapeConstr(
+      ofm_index, asTensorInfo(asTensorShape(_ctx.at(ofm_index).shape()), _ctx.at(ofm_index).type(),
+                              _ctx.at(ofm_index).scale(), _ctx.at(ofm_index).zeroPoint()));
+  _builder.addShapeConstr(
+      ifm_index, asTensorInfo(asTensorShape(_ctx.at(ifm_index).shape()), _ctx.at(ifm_index).type(),
+                              _ctx.at(ifm_index).scale(), _ctx.at(ifm_index).zeroPoint()));
+  _builder.addShapeConstr(
+      ker_index, asTensorInfo(asTensorShape(_ctx.at(ker_index).shape()), _ctx.at(ker_index).type(),
+                              _ctx.at(ker_index).scale(), _ctx.at(ker_index).zeroPoint()));
+  _builder.addShapeConstr(bias_index,
+                          asTensorInfo(asTensorShape(_ctx.at(bias_index).shape()),
+                                       _ctx.at(bias_index).type(), _ctx.at(bias_index).scale(),
+                                       _ctx.at(bias_index).zeroPoint()));
 
   // Set initializer for kernel
   {
@@ -991,11 +992,6 @@ void Planner::visit(const ::internal::tflite::op::Conv2D::Explicit::Node &node)
 
   const ::internal::tflite::operand::Index activation_index{node.param().activation_index};
 
-  const auto ofm_shape = _ctx.at(ofm_index).shape().asFeature();
-  const auto ifm_shape = _ctx.at(ifm_index).shape().asFeature();
-  const auto ker_shape = _ctx.at(ker_index).shape().asKernel();
-  const auto bias_size = _ctx.at(bias_index).shape().asVector();
-
   const int32_t padding_left = _ctx.at(padding_left_index).asScalar<int32_t>();
   const int32_t padding_right = _ctx.at(padding_right_index).asScalar<int32_t>();
   const int32_t padding_top = _ctx.at(padding_top_index).asScalar<int32_t>();
@@ -1008,23 +1004,25 @@ void Planner::visit(const ::internal::tflite::op::Conv2D::Explicit::Node &node)
 
   // TODO Should move to the place where the operand is handled, if it is possible.
   // Set Shape Constraints and TensorInfo
-  _builder.addShapeConstr(ofm_index,
-                          asTensorInfo(ofm_shape, _ctx.at(ofm_index).type(),
-                                       _ctx.at(ofm_index).scale(), _ctx.at(ofm_index).zeroPoint()));
-  _builder.addShapeConstr(ifm_index,
-                          asTensorInfo(ifm_shape, _ctx.at(ifm_index).type(),
-                                       _ctx.at(ifm_index).scale(), _ctx.at(ifm_index).zeroPoint()));
-  _builder.addShapeConstr(ker_index,
-                          asTensorInfo(ker_shape, _ctx.at(ker_index).type(),
-                                       _ctx.at(ker_index).scale(), _ctx.at(ker_index).zeroPoint()));
-  _builder.addShapeConstr(bias_index, asTensorInfo(bias_size, _ctx.at(bias_index).type(),
-                                                   _ctx.at(bias_index).scale(),
-                                                   _ctx.at(bias_index).zeroPoint()));
+  _builder.addShapeConstr(
+      ofm_index, asTensorInfo(asTensorShape(_ctx.at(ofm_index).shape()), _ctx.at(ofm_index).type(),
+                              _ctx.at(ofm_index).scale(), _ctx.at(ofm_index).zeroPoint()));
+  _builder.addShapeConstr(
+      ifm_index, asTensorInfo(asTensorShape(_ctx.at(ifm_index).shape()), _ctx.at(ifm_index).type(),
+                              _ctx.at(ifm_index).scale(), _ctx.at(ifm_index).zeroPoint()));
+  _builder.addShapeConstr(
+      ker_index, asTensorInfo(asTensorShape(_ctx.at(ker_index).shape()), _ctx.at(ker_index).type(),
+                              _ctx.at(ker_index).scale(), _ctx.at(ker_index).zeroPoint()));
+  _builder.addShapeConstr(bias_index,
+                          asTensorInfo(asTensorShape(_ctx.at(bias_index).shape()),
+                                       _ctx.at(bias_index).type(), _ctx.at(bias_index).scale(),
+                                       _ctx.at(bias_index).zeroPoint()));
 
   // Set initializer for kernel
   // Workaround for https://github.sec.samsung.net/STAR/nnfw/issues/2319
   if (_ctx.at(ker_index).hasData())
   {
+    const auto ker_shape = _ctx.at(ker_index).shape().asKernel();
     auto ker_base = _ctx.at(ker_index).data().base();
     auto ker_size = _ctx.at(ker_index).data().size();
     auto ker_type = _ctx.at(ker_index).type();
@@ -1054,6 +1052,7 @@ void Planner::visit(const ::internal::tflite::op::Conv2D::Explicit::Node &node)
   // See above comment.
   if (_ctx.at(bias_index).hasData())
   {
+    const auto bias_size = _ctx.at(bias_index).shape().asVector();
     auto bias_base = _ctx.at(bias_index).data().base();
     auto bias_type = _ctx.at(bias_index).type();
 
@@ -1179,19 +1178,20 @@ void Planner::visit(const ::internal::tflite::op::DepthwiseConv2D::Implicit::Nod
 
   // TODO Should move to the place where the operand is handled, if it is possible.
   // Set Shape Constraints and TensorInfo
-  _builder.addShapeConstr(ofm_index,
-                          asTensorInfo(ofm_shape, _ctx.at(ofm_index).type(),
-                                       _ctx.at(ofm_index).scale(), _ctx.at(ofm_index).zeroPoint()));
-  _builder.addShapeConstr(ifm_index,
-                          asTensorInfo(ifm_shape, _ctx.at(ifm_index).type(),
-                                       _ctx.at(ifm_index).scale(), _ctx.at(ifm_index).zeroPoint()));
+  _builder.addShapeConstr(
+      ofm_index, asTensorInfo(asTensorShape(_ctx.at(ofm_index).shape()), _ctx.at(ofm_index).type(),
+                              _ctx.at(ofm_index).scale(), _ctx.at(ofm_index).zeroPoint()));
+  _builder.addShapeConstr(
+      ifm_index, asTensorInfo(asTensorShape(_ctx.at(ifm_index).shape()), _ctx.at(ifm_index).type(),
+                              _ctx.at(ifm_index).scale(), _ctx.at(ifm_index).zeroPoint()));
   // NOTE DepthwiseConv2D kernel is of shape [1, KER_W, KER_H, IFM_C * MULTIPLIER]
-  _builder.addShapeConstr(ker_index,
-                          asTensorInfo(ker_shape, _ctx.at(ker_index).type(),
-                                       _ctx.at(ker_index).scale(), _ctx.at(ker_index).zeroPoint()));
-  _builder.addShapeConstr(bias_index, asTensorInfo(bias_size, _ctx.at(bias_index).type(),
-                                                   _ctx.at(bias_index).scale(),
-                                                   _ctx.at(bias_index).zeroPoint()));
+  _builder.addShapeConstr(
+      ker_index, asTensorInfo(asTensorShape(_ctx.at(ker_index).shape()), _ctx.at(ker_index).type(),
+                              _ctx.at(ker_index).scale(), _ctx.at(ker_index).zeroPoint()));
+  _builder.addShapeConstr(bias_index,
+                          asTensorInfo(asTensorShape(_ctx.at(bias_index).shape()),
+                                       _ctx.at(bias_index).type(), _ctx.at(bias_index).scale(),
+                                       _ctx.at(bias_index).zeroPoint()));
 
   // Construct operation parameters
   struct Param
@@ -1313,19 +1313,20 @@ void Planner::visit(const ::internal::tflite::op::DepthwiseConv2D::Explicit::Nod
 
   // TODO Should move to the place where the operand is handled, if it is possible.
   // Set Shape Constraints and TensorInfo
-  _builder.addShapeConstr(ofm_index,
-                          asTensorInfo(ofm_shape, _ctx.at(ofm_index).type(),
-                                       _ctx.at(ofm_index).scale(), _ctx.at(ofm_index).zeroPoint()));
-  _builder.addShapeConstr(ifm_index,
-                          asTensorInfo(ifm_shape, _ctx.at(ifm_index).type(),
-                                       _ctx.at(ifm_index).scale(), _ctx.at(ifm_index).zeroPoint()));
+  _builder.addShapeConstr(
+      ofm_index, asTensorInfo(asTensorShape(_ctx.at(ofm_index).shape()), _ctx.at(ofm_index).type(),
+                              _ctx.at(ofm_index).scale(), _ctx.at(ofm_index).zeroPoint()));
+  _builder.addShapeConstr(
+      ifm_index, asTensorInfo(asTensorShape(_ctx.at(ifm_index).shape()), _ctx.at(ifm_index).type(),
+                              _ctx.at(ifm_index).scale(), _ctx.at(ifm_index).zeroPoint()));
   // NOTE DepthwiseConv2D kernel is of shape [1, KER_W, KER_H, IFM_C * MULTIPLIER]
-  _builder.addShapeConstr(ker_index,
-                          asTensorInfo(ker_shape, _ctx.at(ker_index).type(),
-                                       _ctx.at(ker_index).scale(), _ctx.at(ker_index).zeroPoint()));
-  _builder.addShapeConstr(bias_index, asTensorInfo(bias_size, _ctx.at(bias_index).type(),
-                                                   _ctx.at(bias_index).scale(),
-                                                   _ctx.at(bias_index).zeroPoint()));
+  _builder.addShapeConstr(
+      ker_index, asTensorInfo(asTensorShape(_ctx.at(ker_index).shape()), _ctx.at(ker_index).type(),
+                              _ctx.at(ker_index).scale(), _ctx.at(ker_index).zeroPoint()));
+  _builder.addShapeConstr(bias_index,
+                          asTensorInfo(asTensorShape(_ctx.at(bias_index).shape()),
+                                       _ctx.at(bias_index).type(), _ctx.at(bias_index).scale(),
+                                       _ctx.at(bias_index).zeroPoint()));
 
   // Construct operation parameters
   struct Param
@@ -1417,16 +1418,15 @@ void Planner::visit(const ::internal::tflite::op::Dequantize::Node &node)
   assert(_ctx.at(input_index).type() == ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
   assert(_ctx.at(output_index).type() == ANEURALNETWORKS_TENSOR_FLOAT32);
 
-  const auto output_shape = _ctx.at(output_index).shape();
-  const auto input_shape = _ctx.at(input_index).shape();
-
   // Set Shape Constraints
-  _builder.addShapeConstr(output_index, asTensorInfo(output_shape, _ctx.at(output_index).type(),
-                                                     _ctx.at(output_index).scale(),
-                                                     _ctx.at(output_index).zeroPoint()));
-  _builder.addShapeConstr(input_index, asTensorInfo(input_shape, _ctx.at(input_index).type(),
-                                                    _ctx.at(input_index).scale(),
-                                                    _ctx.at(input_index).zeroPoint()));
+  _builder.addShapeConstr(output_index,
+                          asTensorInfo(asTensorShape(_ctx.at(output_index).shape()),
+                                       _ctx.at(output_index).type(), _ctx.at(output_index).scale(),
+                                       _ctx.at(output_index).zeroPoint()));
+  _builder.addShapeConstr(input_index,
+                          asTensorInfo(asTensorShape(_ctx.at(input_index).shape()),
+                                       _ctx.at(input_index).type(), _ctx.at(input_index).scale(),
+                                       _ctx.at(input_index).zeroPoint()));
 
   // Construct operation parameters
   struct Param
@@ -1504,8 +1504,10 @@ void Planner::visit(const ::internal::tflite::op::MaxPool2D::Implicit::Node &nod
 
   // TODO Should move to the place where the operand is handled, if it is possible.
   // Set Shape Constraints and TensorInfo
-  _builder.addShapeConstr(ofm_index, asTensorInfo(ofm_shape, _ctx.at(ofm_index).type()));
-  _builder.addShapeConstr(ifm_index, asTensorInfo(ifm_shape, _ctx.at(ifm_index).type()));
+  _builder.addShapeConstr(ofm_index, asTensorInfo(asTensorShape(_ctx.at(ofm_index).shape()),
+                                                  _ctx.at(ofm_index).type()));
+  _builder.addShapeConstr(ifm_index, asTensorInfo(asTensorShape(_ctx.at(ifm_index).shape()),
+                                                  _ctx.at(ifm_index).type()));
 
   // Construct operation parameters
   struct Param
@@ -1617,8 +1619,10 @@ void Planner::visit(const ::internal::tflite::op::MaxPool2D::Explicit::Node &nod
 
   // TODO Should move to the place where the operand is handled, if it is possible.
   // Set Shape Constraints and TensorInfo
-  _builder.addShapeConstr(ofm_index, asTensorInfo(ofm_shape, _ctx.at(ofm_index).type()));
-  _builder.addShapeConstr(ifm_index, asTensorInfo(ifm_shape, _ctx.at(ifm_index).type()));
+  _builder.addShapeConstr(ofm_index, asTensorInfo(asTensorShape(_ctx.at(ofm_index).shape()),
+                                                  _ctx.at(ofm_index).type()));
+  _builder.addShapeConstr(ifm_index, asTensorInfo(asTensorShape(_ctx.at(ifm_index).shape()),
+                                                  _ctx.at(ifm_index).type()));
 
   // Construct operation parameters
   struct Param
@@ -1728,8 +1732,10 @@ void Planner::visit(const ::internal::tflite::op::AvgPool2D::Implicit::Node &nod
 
   // TODO Should move to the place where the operand is handled, if it is possible.
   // Set Shape Constraints and TensorInfo
-  _builder.addShapeConstr(ofm_index, asTensorInfo(ofm_shape, _ctx.at(ofm_index).type()));
-  _builder.addShapeConstr(ifm_index, asTensorInfo(ifm_shape, _ctx.at(ifm_index).type()));
+  _builder.addShapeConstr(ofm_index, asTensorInfo(asTensorShape(_ctx.at(ofm_index).shape()),
+                                                  _ctx.at(ofm_index).type()));
+  _builder.addShapeConstr(ifm_index, asTensorInfo(asTensorShape(_ctx.at(ifm_index).shape()),
+                                                  _ctx.at(ifm_index).type()));
 
   // Construct operation parameters
   struct Param
@@ -1843,8 +1849,10 @@ void Planner::visit(const ::internal::tflite::op::AvgPool2D::Explicit::Node &nod
 
   // TODO Should move to the place where the operand is handled, if it is possible.
   // Set Shape Constraints and TensorInfo
-  _builder.addShapeConstr(ofm_index, asTensorInfo(ofm_shape, _ctx.at(ofm_index).type()));
-  _builder.addShapeConstr(ifm_index, asTensorInfo(ifm_shape, _ctx.at(ifm_index).type()));
+  _builder.addShapeConstr(ofm_index, asTensorInfo(asTensorShape(_ctx.at(ofm_index).shape()),
+                                                  _ctx.at(ofm_index).type()));
+  _builder.addShapeConstr(ifm_index, asTensorInfo(asTensorShape(_ctx.at(ifm_index).shape()),
+                                                  _ctx.at(ifm_index).type()));
 
   // Construct operation parameters
   struct Param
@@ -1940,7 +1948,8 @@ void Planner::visit(const ::internal::tflite::op::Concat::Node &node)
   }
 
   // Set Shape Constraints and TensorInfo (for output)
-  _builder.addShapeConstr(ofm_index, asTensorInfo(ofm_shape, _ctx.at(ofm_index).type()));
+  _builder.addShapeConstr(ofm_index, asTensorInfo(asTensorShape(_ctx.at(ofm_index).shape()),
+                                                  _ctx.at(ofm_index).type()));
 
   // Set Shape Constraints and TensorInfo (for input)
   const uint32_t coord_index = ToARMComputeAxis(input_rank, axis).value();
@@ -1956,8 +1965,8 @@ void Planner::visit(const ::internal::tflite::op::Concat::Node &node)
 
     coordinates[coord_index] = depth;
 
-    _builder.addSubsumptionConstr(ifm_index, ofm_index, coordinates, asTensorShape(ifm_shape),
-                                  true);
+    _builder.addSubsumptionConstr(ifm_index, ofm_index, coordinates,
+                                  asTensorShape(_ctx.at(ifm_index).shape()), true);
 
     depth += ifm_shape.dim(axis);
   }
@@ -1998,7 +2007,7 @@ void Planner::visit(const ::internal::tflite::op::FullyConnected::Node &node)
 
   // Check for reshaping input's shape into rank-2
   bool needs_reshape = false;
-  nnfw::util::matrix::Shape reshape;
+  internal::tflite::operand::Shape reshape(2);
   if (input_rank == 4)
   {
     nnfw::util::feature::Shape ifm_shape_feature = _ctx.at(input_index).shape().asFeature();
@@ -2006,14 +2015,15 @@ void Planner::visit(const ::internal::tflite::op::FullyConnected::Node &node)
         ifm_shape_feature.N * ifm_shape_feature.C * ifm_shape_feature.H * ifm_shape_feature.W;
     assert(feature_size == batch_size * input_size);
 
-    _builder.addShapeConstr(
-        input_index, asTensorInfo(ifm_shape_feature, _ctx.at(input_index).type(),
-                                  _ctx.at(input_index).scale(), _ctx.at(input_index).zeroPoint()));
+    _builder.addShapeConstr(input_index,
+                            asTensorInfo(asTensorShape(_ctx.at(input_index).shape()),
+                                         _ctx.at(input_index).type(), _ctx.at(input_index).scale(),
+                                         _ctx.at(input_index).zeroPoint()));
 
     // for reshaping
     needs_reshape = true;
-    reshape.H = batch_size;
-    reshape.W = input_size;
+    reshape.dim(0) = batch_size; /* H */
+    reshape.dim(1) = input_size; /* W */
   }
   else if (input_rank == 2)
   {
@@ -2022,22 +2032,26 @@ void Planner::visit(const ::internal::tflite::op::FullyConnected::Node &node)
     assert(ifm_shape.dim(0) == batch_size);
     assert(ifm_shape.dim(1) == input_size);
 
-    _builder.addShapeConstr(input_index, asTensorInfo(ifm_shape_matrix, _ctx.at(input_index).type(),
-                                                      _ctx.at(input_index).scale(),
-                                                      _ctx.at(input_index).zeroPoint()));
+    _builder.addShapeConstr(input_index,
+                            asTensorInfo(asTensorShape(_ctx.at(input_index).shape()),
+                                         _ctx.at(input_index).type(), _ctx.at(input_index).scale(),
+                                         _ctx.at(input_index).zeroPoint()));
   }
 
   // TODO Should move to the place where the operand is handled, if it is possible.
   // Set Shape Constraints
-  _builder.addShapeConstr(
-      output_index, asTensorInfo(_ctx.at(output_index).shape(), _ctx.at(output_index).type(),
-                                 _ctx.at(output_index).scale(), _ctx.at(output_index).zeroPoint()));
-  _builder.addShapeConstr(
-      weight_index, asTensorInfo(_ctx.at(weight_index).shape(), _ctx.at(weight_index).type(),
-                                 _ctx.at(weight_index).scale(), _ctx.at(weight_index).zeroPoint()));
-  _builder.addShapeConstr(
-      bias_index, asTensorInfo(_ctx.at(bias_index).shape(), _ctx.at(bias_index).type(),
-                               _ctx.at(bias_index).scale(), _ctx.at(bias_index).zeroPoint()));
+  _builder.addShapeConstr(output_index,
+                          asTensorInfo(asTensorShape(_ctx.at(output_index).shape()),
+                                       _ctx.at(output_index).type(), _ctx.at(output_index).scale(),
+                                       _ctx.at(output_index).zeroPoint()));
+  _builder.addShapeConstr(weight_index,
+                          asTensorInfo(asTensorShape(_ctx.at(weight_index).shape()),
+                                       _ctx.at(weight_index).type(), _ctx.at(weight_index).scale(),
+                                       _ctx.at(weight_index).zeroPoint()));
+  _builder.addShapeConstr(bias_index,
+                          asTensorInfo(asTensorShape(_ctx.at(bias_index).shape()),
+                                       _ctx.at(bias_index).type(), _ctx.at(bias_index).scale(),
+                                       _ctx.at(bias_index).zeroPoint()));
 
   // Construct operation parameters
   struct Param
@@ -2088,13 +2102,12 @@ void Planner::visit(const ::internal::tflite::op::ResizeBilinear::Node &node)
   const ::internal::tflite::operand::Index height_index{node.param().height_index};
   const ::internal::tflite::operand::Index width_index{node.param().width_index};
 
-  const auto ofm_shape = _ctx.at(ofm_index).shape().asFeature();
-  const auto ifm_shape = _ctx.at(ifm_index).shape().asFeature();
-
   // TODO Should move to the place where the operand is handled, if it is possible.
   // Set Shape Constraints
-  _builder.addShapeConstr(ofm_index, asTensorInfo(ofm_shape, _ctx.at(ofm_index).type()));
-  _builder.addShapeConstr(ifm_index, asTensorInfo(ifm_shape, _ctx.at(ifm_index).type()));
+  _builder.addShapeConstr(ofm_index, asTensorInfo(asTensorShape(_ctx.at(ofm_index).shape()),
+                                                  _ctx.at(ofm_index).type()));
+  _builder.addShapeConstr(ifm_index, asTensorInfo(asTensorShape(_ctx.at(ifm_index).shape()),
+                                                  _ctx.at(ifm_index).type()));
 
   struct Param
   {
@@ -2147,10 +2160,10 @@ void Planner::visit(const ::internal::tflite::op::Reshape::Node &node)
   // assert((ifm_shape.C * ifm_shape.H * ifm_shape.W) == out_size);
 
   // TODO Should move to the place where the operand is handled, if it is possible.
-  _builder.addShapeConstr(
-      output_index, asTensorInfo(_ctx.at(output_index).shape(), _ctx.at(output_index).type()));
-  _builder.addShapeConstr(input_index,
-                          asTensorInfo(_ctx.at(input_index).shape(), _ctx.at(input_index).type()));
+  _builder.addShapeConstr(output_index, asTensorInfo(asTensorShape(_ctx.at(output_index).shape()),
+                                                     _ctx.at(output_index).type()));
+  _builder.addShapeConstr(input_index, asTensorInfo(asTensorShape(_ctx.at(input_index).shape()),
+                                                    _ctx.at(input_index).type()));
 
   struct Param
   {
@@ -2197,12 +2210,12 @@ void Planner::visit(const ::internal::tflite::op::Squeeze::Node &node)
   //   - 4D input fails (squeeze.mod.py) -> we need general tensor support
 
   // TODO Support generic tensor shape
-  const auto output_shape = _ctx.at(output_index).shape();
-  const auto input_shape = _ctx.at(input_index).shape();
 
   // Set Shape Constraints
-  _builder.addShapeConstr(output_index, asTensorInfo(output_shape, _ctx.at(output_index).type()));
-  _builder.addShapeConstr(input_index, asTensorInfo(input_shape, _ctx.at(input_index).type()));
+  _builder.addShapeConstr(output_index, asTensorInfo(asTensorShape(_ctx.at(output_index).shape()),
+                                                     _ctx.at(output_index).type()));
+  _builder.addShapeConstr(input_index, asTensorInfo(asTensorShape(_ctx.at(input_index).shape()),
+                                                    _ctx.at(input_index).type()));
 
   // Construct operation parameters
   struct Param
@@ -2246,12 +2259,14 @@ void Planner::visit(const ::internal::tflite::op::Softmax::Node &node)
   assert(_ctx.at(scale_index).shape().rank() == 0);
 
   // TODO Should move to the place where the operand is handled, if it is possible.
-  _builder.addShapeConstr(
-      output_index, asTensorInfo(_ctx.at(output_index).shape(), _ctx.at(output_index).type(),
-                                 _ctx.at(output_index).scale(), _ctx.at(output_index).zeroPoint()));
-  _builder.addShapeConstr(
-      input_index, asTensorInfo(_ctx.at(input_index).shape(), _ctx.at(input_index).type(),
-                                _ctx.at(input_index).scale(), _ctx.at(input_index).zeroPoint()));
+  _builder.addShapeConstr(output_index,
+                          asTensorInfo(asTensorShape(_ctx.at(output_index).shape()),
+                                       _ctx.at(output_index).type(), _ctx.at(output_index).scale(),
+                                       _ctx.at(output_index).zeroPoint()));
+  _builder.addShapeConstr(input_index,
+                          asTensorInfo(asTensorShape(_ctx.at(input_index).shape()),
+                                       _ctx.at(input_index).type(), _ctx.at(input_index).scale(),
+                                       _ctx.at(input_index).zeroPoint()));
 
   struct Param
   {
@@ -2306,29 +2321,36 @@ void Planner::visit(const ::internal::tflite::op::StridedSlice::Node &node)
   const ::internal::tflite::operand::Index shrinkAxisMask_index{node.param().shrinkAxisMask_index};
 
   // Set Shape Constraints
-  _builder.addShapeConstr(outputData_index, asTensorInfo(_ctx.at(outputData_index).shape(),
-                                                         _ctx.at(outputData_index).type(),
-                                                         _ctx.at(outputData_index).scale(),
-                                                         _ctx.at(outputData_index).zeroPoint()));
-  _builder.addShapeConstr(inputData_index, asTensorInfo(_ctx.at(inputData_index).shape(),
-                                                        _ctx.at(inputData_index).type(),
-                                                        _ctx.at(inputData_index).scale(),
-                                                        _ctx.at(inputData_index).zeroPoint()));
-
-  const auto startData_size = _ctx.at(startData_index).shape().asVector();
-  const auto endData_size = _ctx.at(endData_index).shape().asVector();
-  const auto stridesData_size = _ctx.at(stridesData_index).shape().asVector();
+  _builder.addShapeConstr(outputData_index,
+                          asTensorInfo(asTensorShape(_ctx.at(outputData_index).shape()),
+                                       _ctx.at(outputData_index).type(),
+                                       _ctx.at(outputData_index).scale(),
+                                       _ctx.at(outputData_index).zeroPoint()));
+  _builder.addShapeConstr(
+      inputData_index,
+      asTensorInfo(asTensorShape(_ctx.at(inputData_index).shape()), _ctx.at(inputData_index).type(),
+                   _ctx.at(inputData_index).scale(), _ctx.at(inputData_index).zeroPoint()));
+
+  assert(_ctx.at(startData_index).shape().rank() == 1);
+  assert(_ctx.at(endData_index).shape().rank() == 1);
+  assert(_ctx.at(stridesData_index).shape().rank() == 1);
   _builder.addShapeConstr(startData_index,
-                          asTensorInfo(startData_size, _ctx.at(startData_index).type()));
-  _builder.addShapeConstr(endData_index, asTensorInfo(endData_size, _ctx.at(endData_index).type()));
+                          asTensorInfo(asTensorShape(_ctx.at(startData_index).shape()),
+                                       _ctx.at(startData_index).type()));
+  _builder.addShapeConstr(endData_index, asTensorInfo(asTensorShape(_ctx.at(endData_index).shape()),
+                                                      _ctx.at(endData_index).type()));
   _builder.addShapeConstr(stridesData_index,
-                          asTensorInfo(stridesData_size, _ctx.at(stridesData_index).type()));
+                          asTensorInfo(asTensorShape(_ctx.at(endData_index).shape()),
+                                       _ctx.at(stridesData_index).type()));
 
   // Set initializers for indices data such as order of inputData
   {
     auto startData_base = _ctx.at(startData_index).data().base();
     auto endData_base = _ctx.at(endData_index).data().base();
     auto stridesData_base = _ctx.at(stridesData_index).data().base();
+    const auto startData_size = _ctx.at(startData_index).shape().asVector();
+    const auto endData_size = _ctx.at(endData_index).shape().asVector();
+    const auto stridesData_size = _ctx.at(stridesData_index).shape().asVector();
 
     assert(_ctx.at(startData_index).type() == ANEURALNETWORKS_TENSOR_INT32);
     auto startData_initializer =
@@ -2419,9 +2441,10 @@ void Planner::visit(const ::internal::tflite::op::ReduceMax::Node &node)
   assert(_ctx.at(axis_index).hasData());
   assert(axis_shape.rank() == 0 || ((axis_shape.rank() == 1) && (axis_shape.dim(0) == 1)));
 
-  _builder.addShapeConstr(ofm_index, asTensorInfo(ofm_shape.dim(0), _ctx.at(ofm_index).type()));
-  _builder.addShapeConstr(
-      ifm_index, asTensorInfo(ifm_shape.dim(0), ifm_shape.dim(1), _ctx.at(ifm_index).type()));
+  _builder.addShapeConstr(ofm_index, asTensorInfo(asTensorShape(_ctx.at(ofm_index).shape()),
+                                                  _ctx.at(ofm_index).type()));
+  _builder.addShapeConstr(ifm_index, asTensorInfo(asTensorShape(_ctx.at(ifm_index).shape()),
+                                                  _ctx.at(ifm_index).type()));
 
   // Note: Assume only one element in axis. It is checked by assertion above
   // TODO: handle general case
@@ -2468,17 +2491,16 @@ void Planner::visit(const ::internal::tflite::op::Cast::Node &node)
   const ::internal::tflite::operand::Index output_index{node.param().output_index};
   const ::internal::tflite::operand::Index input_index{node.param().input_index};
 
-  const auto output_shape = _ctx.at(output_index).shape().asTensor();
-  const auto input_shape = _ctx.at(input_index).shape().asTensor();
+  assert(_ctx.at(output_index).shape() == _ctx.at(input_index).shape());
 
-  assert(output_shape == input_shape);
-
-  _builder.addShapeConstr(output_index, asTensorInfo(input_shape, _ctx.at(output_index).type(),
-                                                     _ctx.at(output_index).scale(),
-                                                     _ctx.at(output_index).zeroPoint()));
-  _builder.addShapeConstr(input_index, asTensorInfo(output_shape, _ctx.at(input_index).type(),
-                                                    _ctx.at(input_index).scale(),
-                                                    _ctx.at(input_index).zeroPoint()));
+  _builder.addShapeConstr(output_index,
+                          asTensorInfo(asTensorShape(_ctx.at(output_index).shape()),
+                                       _ctx.at(output_index).type(), _ctx.at(output_index).scale(),
+                                       _ctx.at(output_index).zeroPoint()));
+  _builder.addShapeConstr(input_index,
+                          asTensorInfo(asTensorShape(_ctx.at(input_index).shape()),
+                                       _ctx.at(input_index).type(), _ctx.at(input_index).scale(),
+                                       _ctx.at(input_index).zeroPoint()));
 
   // Construct operation parameters
   struct Param
@@ -2537,18 +2559,18 @@ void Planner::visit(const ::internal::tflite::op::TopKV2::Node &node)
   assert(_ctx.at(inputData_index).shape().rank() == 1 ||
          _ctx.at(inputData_index).shape().rank() == 2);
 
-  const auto outputValues_shape = _ctx.at(outputValues_index).shape().asTensor();
-  const auto outputIndices_shape = _ctx.at(outputIndices_index).shape().asTensor();
-  const auto inputData_shape = _ctx.at(inputData_index).shape().asTensor();
   const int32_t k = _ctx.at(k_index).asScalar<int32_t>();
 
   // Set shape constraints
   _builder.addShapeConstr(outputValues_index,
-                          asTensorInfo(outputValues_shape, _ctx.at(outputValues_index).type()));
+                          asTensorInfo(asTensorShape(_ctx.at(outputValues_index).shape()),
+                                       _ctx.at(outputValues_index).type()));
   _builder.addShapeConstr(outputIndices_index,
-                          asTensorInfo(outputIndices_shape, _ctx.at(outputIndices_index).type()));
+                          asTensorInfo(asTensorShape(_ctx.at(outputIndices_index).shape()),
+                                       _ctx.at(outputIndices_index).type()));
   _builder.addShapeConstr(inputData_index,
-                          asTensorInfo(inputData_shape, _ctx.at(inputData_index).type()));
+                          asTensorInfo(asTensorShape(_ctx.at(inputData_index).shape()),
+                                       _ctx.at(inputData_index).type()));
 
   // Construct operation parameters
   struct Param
@@ -2598,16 +2620,15 @@ void Planner::visit(const ::internal::tflite::op::Gather::Node &node)
 
   // Currently, 1D-input and 2D-input are supported.
   assert(_ctx.at(lhs_index).shape().rank() == 1 || _ctx.at(lhs_index).shape().rank() == 2);
-
-  // TODO Support generic tensor shape
-  const auto ofm_shape = _ctx.at(ofm_index).shape();
-  const auto lhs_shape = _ctx.at(lhs_index).shape();
-  const auto rhs_shape = _ctx.at(rhs_index).shape().asVector();
+  assert(_ctx.at(rhs_index).shape().rank() == 1);
 
   // Set Shape Constraints
-  _builder.addShapeConstr(ofm_index, asTensorInfo(ofm_shape, _ctx.at(ofm_index).type()));
-  _builder.addShapeConstr(lhs_index, asTensorInfo(lhs_shape, _ctx.at(lhs_index).type()));
-  _builder.addShapeConstr(rhs_index, asTensorInfo(rhs_shape, _ctx.at(rhs_index).type()));
+  _builder.addShapeConstr(ofm_index, asTensorInfo(asTensorShape(_ctx.at(ofm_index).shape()),
+                                                  _ctx.at(ofm_index).type()));
+  _builder.addShapeConstr(lhs_index, asTensorInfo(asTensorShape(_ctx.at(lhs_index).shape()),
+                                                  _ctx.at(lhs_index).type()));
+  _builder.addShapeConstr(rhs_index, asTensorInfo(asTensorShape(_ctx.at(rhs_index).shape()),
+                                                  _ctx.at(rhs_index).type()));
 
   // Construct operation parameters
   struct Param
@@ -2656,12 +2677,12 @@ void Planner::visit(const ::internal::tflite::op::ReLU::Node &node)
   const ::internal::tflite::operand::Index ifm_index{node.param().ifm_index};
 
   // Set shape constraints
-  _builder.addShapeConstr(ofm_index,
-                          asTensorInfo(_ctx.at(ofm_index).shape(), _ctx.at(ofm_index).type(),
-                                       _ctx.at(ofm_index).scale(), _ctx.at(ofm_index).zeroPoint()));
-  _builder.addShapeConstr(ifm_index,
-                          asTensorInfo(_ctx.at(ifm_index).shape(), _ctx.at(ifm_index).type(),
-                                       _ctx.at(ifm_index).scale(), _ctx.at(ifm_index).zeroPoint()));
+  _builder.addShapeConstr(
+      ofm_index, asTensorInfo(asTensorShape(_ctx.at(ofm_index).shape()), _ctx.at(ofm_index).type(),
+                              _ctx.at(ofm_index).scale(), _ctx.at(ofm_index).zeroPoint()));
+  _builder.addShapeConstr(
+      ifm_index, asTensorInfo(asTensorShape(_ctx.at(ifm_index).shape()), _ctx.at(ifm_index).type(),
+                              _ctx.at(ifm_index).scale(), _ctx.at(ifm_index).zeroPoint()));
 
   struct Param
   {
@@ -2710,12 +2731,12 @@ void Planner::visit(const ::internal::tflite::op::ReLU1::Node &node)
   const ::internal::tflite::operand::Index ifm_index{node.param().ifm_index};
 
   // Set shape constraints
-  _builder.addShapeConstr(ofm_index,
-                          asTensorInfo(_ctx.at(ofm_index).shape(), _ctx.at(ofm_index).type(),
-                                       _ctx.at(ofm_index).scale(), _ctx.at(ofm_index).zeroPoint()));
-  _builder.addShapeConstr(ifm_index,
-                          asTensorInfo(_ctx.at(ifm_index).shape(), _ctx.at(ifm_index).type(),
-                                       _ctx.at(ifm_index).scale(), _ctx.at(ifm_index).zeroPoint()));
+  _builder.addShapeConstr(
+      ofm_index, asTensorInfo(asTensorShape(_ctx.at(ofm_index).shape()), _ctx.at(ofm_index).type(),
+                              _ctx.at(ofm_index).scale(), _ctx.at(ofm_index).zeroPoint()));
+  _builder.addShapeConstr(
+      ifm_index, asTensorInfo(asTensorShape(_ctx.at(ifm_index).shape()), _ctx.at(ifm_index).type(),
+                              _ctx.at(ifm_index).scale(), _ctx.at(ifm_index).zeroPoint()));
 
   struct Param
   {
@@ -2764,12 +2785,12 @@ void Planner::visit(const ::internal::tflite::op::ReLU6::Node &node)
   const ::internal::tflite::operand::Index ifm_index{node.param().ifm_index};
 
   // Set shape constraints
-  _builder.addShapeConstr(ofm_index,
-                          asTensorInfo(_ctx.at(ofm_index).shape(), _ctx.at(ofm_index).type(),
-                                       _ctx.at(ofm_index).scale(), _ctx.at(ofm_index).zeroPoint()));
-  _builder.addShapeConstr(ifm_index,
-                          asTensorInfo(_ctx.at(ifm_index).shape(), _ctx.at(ifm_index).type(),
-                                       _ctx.at(ifm_index).scale(), _ctx.at(ifm_index).zeroPoint()));
+  _builder.addShapeConstr(
+      ofm_index, asTensorInfo(asTensorShape(_ctx.at(ofm_index).shape()), _ctx.at(ofm_index).type(),
+                              _ctx.at(ofm_index).scale(), _ctx.at(ofm_index).zeroPoint()));
+  _builder.addShapeConstr(
+      ifm_index, asTensorInfo(asTensorShape(_ctx.at(ifm_index).shape()), _ctx.at(ifm_index).type(),
+                              _ctx.at(ifm_index).scale(), _ctx.at(ifm_index).zeroPoint()));
 
   struct Param
   {
@@ -2818,10 +2839,10 @@ void Planner::visit(const ::internal::tflite::op::Tanh::Node &node)
   const ::internal::tflite::operand::Index ifm_index{node.param().ifm_index};
 
   // Set shape constraints
-  _builder.addShapeConstr(ofm_index,
-                          asTensorInfo(_ctx.at(ofm_index).shape(), _ctx.at(ofm_index).type()));
-  _builder.addShapeConstr(ifm_index,
-                          asTensorInfo(_ctx.at(ifm_index).shape(), _ctx.at(ifm_index).type()));
+  _builder.addShapeConstr(ofm_index, asTensorInfo(asTensorShape(_ctx.at(ofm_index).shape()),
+                                                  _ctx.at(ofm_index).type()));
+  _builder.addShapeConstr(ifm_index, asTensorInfo(asTensorShape(_ctx.at(ifm_index).shape()),
+                                                  _ctx.at(ifm_index).type()));
 
   struct Param
   {
@@ -2864,12 +2885,12 @@ void Planner::visit(const ::internal::tflite::op::Logistic::Node &node)
   const ::internal::tflite::operand::Index ifm_index{node.param().ifm_index};
 
   // Set shape constraints
-  _builder.addShapeConstr(ofm_index,
-                          asTensorInfo(_ctx.at(ofm_index).shape(), _ctx.at(ofm_index).type(),
-                                       _ctx.at(ofm_index).scale(), _ctx.at(ofm_index).zeroPoint()));
-  _builder.addShapeConstr(ifm_index,
-                          asTensorInfo(_ctx.at(ifm_index).shape(), _ctx.at(ifm_index).type(),
-                                       _ctx.at(ifm_index).scale(), _ctx.at(ifm_index).zeroPoint()));
+  _builder.addShapeConstr(
+      ofm_index, asTensorInfo(asTensorShape(_ctx.at(ofm_index).shape()), _ctx.at(ofm_index).type(),
+                              _ctx.at(ofm_index).scale(), _ctx.at(ofm_index).zeroPoint()));
+  _builder.addShapeConstr(
+      ifm_index, asTensorInfo(asTensorShape(_ctx.at(ifm_index).shape()), _ctx.at(ifm_index).type(),
+                              _ctx.at(ifm_index).scale(), _ctx.at(ifm_index).zeroPoint()));
 
   struct Param
   {
@@ -2916,12 +2937,12 @@ void Planner::visit(const ::internal::tflite::op::Mean::Node &node)
   const int keep_dims = _ctx.at(keep_dims_index).asScalar<int>();
 
   // Set shape constraints
-  _builder.addShapeConstr(
-      ofm_index, asTensorInfo(_ctx.at(ofm_index).shape().asTensor(), _ctx.at(ofm_index).type()));
-  _builder.addShapeConstr(
-      ifm_index, asTensorInfo(_ctx.at(ifm_index).shape().asTensor(), _ctx.at(ifm_index).type()));
-  _builder.addShapeConstr(
-      axis_index, asTensorInfo(_ctx.at(axis_index).shape().asVector(), _ctx.at(axis_index).type()));
+  _builder.addShapeConstr(ofm_index, asTensorInfo(asTensorShape(_ctx.at(ofm_index).shape()),
+                                                  _ctx.at(ofm_index).type()));
+  _builder.addShapeConstr(ifm_index, asTensorInfo(asTensorShape(_ctx.at(ifm_index).shape()),
+                                                  _ctx.at(ifm_index).type()));
+  _builder.addShapeConstr(axis_index, asTensorInfo(asTensorShape(_ctx.at(axis_index).shape()),
+                                                   _ctx.at(axis_index).type()));
 
   // TODO keep_dims==0
   assert(keep_dims != 0);
@@ -2938,7 +2959,7 @@ void Planner::visit(const ::internal::tflite::op::Mean::Node &node)
     const auto axis_size = _ctx.at(axis_index).shape().asVector();
 
     // NHWC type -> WHCN type
-    if (_ctx.at(ofm_index).shape().asTensor().rank() == 4)
+    if (_ctx.at(ofm_index).shape().rank() == 4)
     {
       for (uint32_t n = 0; n < axis_size; ++n)
       {
@@ -3035,23 +3056,23 @@ void Planner::visit(const ::internal::tflite::op::RNN::Node &node)
          num_units == _ctx.at(hidden_state_out_index).shape().dim(1));
 
   // Set Shape Constraints and TensorInfo
-  _builder.addShapeConstr(
-      output_index, asTensorInfo(_ctx.at(output_index).shape(), _ctx.at(output_index).type()));
+  _builder.addShapeConstr(output_index, asTensorInfo(asTensorShape(_ctx.at(output_index).shape()),
+                                                     _ctx.at(output_index).type()));
   _builder.addShapeConstr(hidden_state_out_index,
-                          asTensorInfo(_ctx.at(hidden_state_out_index).shape(),
+                          asTensorInfo(asTensorShape(_ctx.at(hidden_state_out_index).shape()),
                                        _ctx.at(hidden_state_out_index).type()));
-  _builder.addShapeConstr(input_index,
-                          asTensorInfo(_ctx.at(input_index).shape(), _ctx.at(input_index).type()));
-  _builder.addShapeConstr(
-      weights_index, asTensorInfo(_ctx.at(weights_index).shape(), _ctx.at(weights_index).type()));
+  _builder.addShapeConstr(input_index, asTensorInfo(asTensorShape(_ctx.at(input_index).shape()),
+                                                    _ctx.at(input_index).type()));
+  _builder.addShapeConstr(weights_index, asTensorInfo(asTensorShape(_ctx.at(weights_index).shape()),
+                                                      _ctx.at(weights_index).type()));
   _builder.addShapeConstr(recurrent_weights_index,
-                          asTensorInfo(_ctx.at(recurrent_weights_index).shape(),
+                          asTensorInfo(asTensorShape(_ctx.at(recurrent_weights_index).shape()),
                                        _ctx.at(recurrent_weights_index).type()));
-  _builder.addShapeConstr(bias_index,
-                          asTensorInfo(_ctx.at(bias_index).shape(), _ctx.at(bias_index).type()));
-  _builder.addShapeConstr(
-      hidden_state_in_index,
-      asTensorInfo(_ctx.at(hidden_state_in_index).shape(), _ctx.at(hidden_state_in_index).type()));
+  _builder.addShapeConstr(bias_index, asTensorInfo(asTensorShape(_ctx.at(bias_index).shape()),
+                                                   _ctx.at(bias_index).type()));
+  _builder.addShapeConstr(hidden_state_in_index,
+                          asTensorInfo(asTensorShape(_ctx.at(hidden_state_in_index).shape()),
+                                       _ctx.at(hidden_state_in_index).type()));
 
   // Construct operation parameters
   struct Param
@@ -3134,12 +3155,12 @@ void Planner::visit(const ::internal::tflite::op::Transpose::Node &node)
   const ::internal::tflite::operand::Index ifm_index{node.param().ifm_index};
 
   // Set shape constraints
-  _builder.addShapeConstr(ofm_index,
-                          asTensorInfo(_ctx.at(ofm_index).shape(), _ctx.at(ofm_index).type(),
-                                       _ctx.at(ofm_index).scale(), _ctx.at(ofm_index).zeroPoint()));
-  _builder.addShapeConstr(ifm_index,
-                          asTensorInfo(_ctx.at(ifm_index).shape(), _ctx.at(ifm_index).type(),
-                                       _ctx.at(ifm_index).scale(), _ctx.at(ifm_index).zeroPoint()));
+  _builder.addShapeConstr(
+      ofm_index, asTensorInfo(asTensorShape(_ctx.at(ofm_index).shape()), _ctx.at(ofm_index).type(),
+                              _ctx.at(ofm_index).scale(), _ctx.at(ofm_index).zeroPoint()));
+  _builder.addShapeConstr(
+      ifm_index, asTensorInfo(asTensorShape(_ctx.at(ifm_index).shape()), _ctx.at(ifm_index).type(),
+                              _ctx.at(ifm_index).scale(), _ctx.at(ifm_index).zeroPoint()));
   // NNAPI spec provides permutation vector for generic transpose
   // TODO Make the permutation vector a part of Param
   struct Param
@@ -3177,10 +3198,10 @@ void Planner::visit(const ::internal::tflite::op::Floor::Node &node)
   const ::internal::tflite::operand::Index ifm_index{node.param().input_index};
 
   // Set shape constraints
-  _builder.addShapeConstr(ofm_index,
-                          asTensorInfo(_ctx.at(ofm_index).shape(), _ctx.at(ofm_index).type()));
-  _builder.addShapeConstr(ifm_index,
-                          asTensorInfo(_ctx.at(ifm_index).shape(), _ctx.at(ifm_index).type()));
+  _builder.addShapeConstr(ofm_index, asTensorInfo(asTensorShape(_ctx.at(ofm_index).shape()),
+                                                  _ctx.at(ofm_index).type()));
+  _builder.addShapeConstr(ifm_index, asTensorInfo(asTensorShape(_ctx.at(ifm_index).shape()),
+                                                  _ctx.at(ifm_index).type()));
 
   struct Param
   {
@@ -3241,7 +3262,8 @@ void Planner::visit(const ::internal::tflite::op::Split::Node &node)
   const int32_t slice_size = input_size / num_split;
 
   // Set Shape Constraints and TensorInfo (for input)
-  _builder.addShapeConstr(ifm_index, asTensorInfo(ifm_shape, _ctx.at(ifm_index).type()));
+  _builder.addShapeConstr(ifm_index, asTensorInfo(asTensorShape(_ctx.at(ifm_index).shape()),
+                                                  _ctx.at(ifm_index).type()));
 
   // Set Shape Constraints and TensorInfo (for output)
   const auto rank = ifm_shape.rank();
@@ -3254,12 +3276,11 @@ void Planner::visit(const ::internal::tflite::op::Split::Node &node)
   for (const auto &index : node.param().ofm_indexes)
   {
     const ::internal::tflite::operand::Index ofm_index{index};
-    const auto ofm_shape = _ctx.at(ofm_index).shape();
 
     coordinates[coord_index] = depth;
 
-    _builder.addSubsumptionConstr(ofm_index, ifm_index, coordinates, asTensorShape(ofm_shape),
-                                  true);
+    _builder.addSubsumptionConstr(ofm_index, ifm_index, coordinates,
+                                  asTensorShape(_ctx.at(ofm_index).shape()), true);
     depth += slice_size;
   }
 
index ae5e39f..470e76c 100644 (file)
@@ -5,55 +5,6 @@
 #include "internal/Swizzle.h"
 #include "internal/Model.h"
 
-::arm_compute::TensorShape asTensorShape(int32_t h, int32_t w)
-{
-  return ::arm_compute::TensorShape(w, h);
-}
-
-::arm_compute::TensorShape asTensorShape(const nnfw::util::feature::Shape &shape)
-{
-  return ::arm_compute::TensorShape(shape.W, shape.H, shape.C, shape.N);
-}
-
-::arm_compute::TensorShape asTensorShape(const nnfw::util::kernel::Shape &shape)
-{
-  return ::arm_compute::TensorShape(shape.W, shape.H, shape.C, shape.N);
-}
-
-::arm_compute::TensorShape asTensorShape(const nnfw::util::matrix::Shape &shape)
-{
-  return ::arm_compute::TensorShape(shape.W, shape.H);
-}
-
-::arm_compute::TensorShape asTensorShape(const nnfw::util::tensor::Shape &shape)
-{
-  if (shape.rank() == 0)
-  {
-    return ::arm_compute::TensorShape(1);
-  }
-  else if (shape.rank() == 1)
-  {
-    return ::arm_compute::TensorShape(shape.dim(0));
-  }
-  else if (shape.rank() == 2)
-  {
-    return ::arm_compute::TensorShape(shape.dim(1), shape.dim(0)); // W H
-  }
-  else if (shape.rank() == 3)
-  {
-    return ::arm_compute::TensorShape(shape.dim(2), shape.dim(1), shape.dim(0));
-  }
-  else if (shape.rank() == 4)
-  {
-    return ::arm_compute::TensorShape(shape.dim(2), shape.dim(1), shape.dim(3),
-                                      shape.dim(0)); // W H C N
-  }
-  else
-  {
-    throw std::runtime_error("Not supported, yet");
-  }
-}
-
 inline ::arm_compute::TensorShape asTensorShape(const internal::tflite::operand::Shape &shape)
 {
   const uint32_t rank = shape.rank();
@@ -119,55 +70,10 @@ inline ::arm_compute::TensorShape asTensorShape(const internal::tflite::operand:
   return ::arm_compute::QuantizationInfo(scale, offset);
 }
 
-::arm_compute::TensorInfo asTensorInfo(const nnfw::util::feature::Shape &shape, const int32_t type,
+::arm_compute::TensorInfo asTensorInfo(const ::arm_compute::TensorShape &shape, const int32_t type,
                                        const float scale = 0.0f, const int32_t zeroPoint = 0)
 {
-  return ::arm_compute::TensorInfo(asTensorShape(shape), 1, asDataType(type),
-                                   asQuantizationInfo(scale, zeroPoint));
-}
-
-::arm_compute::TensorInfo asTensorInfo(const nnfw::util::kernel::Shape &shape, const int32_t type,
-                                       const float scale = 0.0f, const int32_t zeroPoint = 0)
-{
-  return ::arm_compute::TensorInfo(asTensorShape(shape), 1, asDataType(type),
-                                   asQuantizationInfo(scale, zeroPoint));
-}
-
-// NOTE : internal::tflite::operand::Shape inherits nnfw::util::tensor::Shape
-// If you use internal::tflite::operand::Shape, double check if calling this method is correct
-::arm_compute::TensorInfo asTensorInfo(const nnfw::util::tensor::Shape &shape, const int32_t type,
-                                       const float scale = 0.0f, const int32_t zeroPoint = 0)
-{
-  return ::arm_compute::TensorInfo(asTensorShape(shape), 1, asDataType(type),
-                                   asQuantizationInfo(scale, zeroPoint));
-}
-
-::arm_compute::TensorInfo asTensorInfo(const nnfw::util::matrix::Shape &shape, const int32_t type,
-                                       const float scale = 0.0f, const int32_t zeroPoint = 0)
-{
-  return ::arm_compute::TensorInfo(asTensorShape(shape), 1, asDataType(type),
-                                   asQuantizationInfo(scale, zeroPoint));
-}
-
-::arm_compute::TensorInfo asTensorInfo(int32_t size, const int32_t type, const float scale = 0.0f,
-                                       const int32_t zeroPoint = 0)
-{
-  return ::arm_compute::TensorInfo(::arm_compute::TensorShape(size), 1, asDataType(type),
-                                   asQuantizationInfo(scale, zeroPoint));
-}
-
-::arm_compute::TensorInfo asTensorInfo(int32_t h, int32_t w, const int32_t type,
-                                       const float scale = 0.0f, const int32_t zeroPoint = 0)
-{
-  return ::arm_compute::TensorInfo(::arm_compute::TensorShape(w, h), 1, asDataType(type),
-                                   asQuantizationInfo(scale, zeroPoint));
-}
-
-::arm_compute::TensorInfo asTensorInfo(const internal::tflite::operand::Shape &shape,
-                                       const int32_t type, const float scale = 0.0f,
-                                       const int32_t zeroPoint = 0)
-{
-  return ::arm_compute::TensorInfo(asTensorShape(shape), 1, asDataType(type),
+  return ::arm_compute::TensorInfo(shape, 1, asDataType(type),
                                    asQuantizationInfo(scale, zeroPoint));
 }