Support the broadcast of all cases (#2511)
author장지섭/동작제어Lab(SR)/Engineer/삼성전자 <jiseob.jang@samsung.com>
Wed, 29 Aug 2018 07:37:42 +0000 (16:37 +0900)
committer박세희/동작제어Lab(SR)/Principal Engineer/삼성전자 <saehie.park@samsung.com>
Wed, 29 Aug 2018 07:37:42 +0000 (16:37 +0900)
* Support the broadcast of all cases

This commit supports the broadcast of all case.
- Add Extending the rank of operand.
- Apply the bradcast to the arithmetic operations.

Signed-off-by: jiseob.jang <jiseob.jang@samsung.com>
* Remove unnecessary residual codes related to the broadcast

This commit removes unnecessary residual codes related to the broadcast.

Signed-off-by: jiseob.jang <jiseob.jang@samsung.com>
runtimes/pure_arm_compute/src/compilation.cc
runtimes/pure_arm_compute/src/compilation.h
runtimes/pure_arm_compute/src/execution.cc
runtimes/pure_arm_compute/src/execution.h
runtimes/pure_arm_compute/src/internal/Model.cc
runtimes/pure_arm_compute/src/internal/Model.h
runtimes/pure_arm_compute/src/internal/arm_compute/Cast.h

index f597fdc..5d7b521 100644 (file)
@@ -279,12 +279,6 @@ struct IPlanBuilder
 
   virtual void addShapeConstr(const ::internal::tflite::operand::Index &ind,
                               const ::arm_compute::TensorInfo &info) = 0;
-  virtual void addShapeConstr(const ::internal::tflite::operand::Index &lhs_ind,
-                              const ::internal::tflite::operand::Object &lhs_obj,
-                              const nnfw::util::tensor::Shape &lhs_shape,
-                              const ::internal::tflite::operand::Index &rhs_ind,
-                              const ::internal::tflite::operand::Object &rhs_obj,
-                              const nnfw::util::tensor::Shape &rhs_shape) = 0;
   virtual void addSubsumptionConstr(const ::internal::tflite::operand::Index &ind,
                                     const ::internal::tflite::operand::Index &base,
                                     const ::arm_compute::Coordinates &offset,
@@ -495,17 +489,27 @@ void Planner::visit(const ::internal::tflite::op::Add::Node &node)
   const ::internal::tflite::operand::Index rhs_index{node.param().rhs_index};
   const ::internal::tflite::operand::Index activation_index{node.param().activation_index};
 
-  const auto ofm_shape = _ctx.at(ofm_index).shape().asTensor();
-  const auto lhs_shape = _ctx.at(lhs_index).shape().asTensor();
-  const auto rhs_shape = _ctx.at(rhs_index).shape().asTensor();
-
   // TODO Should move to the place where the operand is handled, if it is possible.
   // Set Shape Constraints and TensorInfo
   _builder.addShapeConstr(ofm_index,
-                          asTensorInfo(ofm_shape, _ctx.at(ofm_index).type(),
+                          asTensorInfo(_ctx.at(ofm_index).shape(), _ctx.at(ofm_index).type(),
                                        _ctx.at(ofm_index).scale(), _ctx.at(ofm_index).zeroPoint()));
-  _builder.addShapeConstr(lhs_index, _ctx.at(lhs_index), lhs_shape, rhs_index, _ctx.at(rhs_index),
-                          rhs_shape);
+
+  if (!(_ctx.at(lhs_index).shape() == _ctx.at(rhs_index).shape()))
+  {
+    const auto broadcast_rank =
+        std::max(_ctx.at(lhs_index).shape().rank(), _ctx.at(rhs_index).shape().rank());
+    const_cast<::internal::tflite::operand::Shape &>(_ctx.at(lhs_index).shape())
+        .extendRank(broadcast_rank);
+    const_cast<::internal::tflite::operand::Shape &>(_ctx.at(rhs_index).shape())
+        .extendRank(broadcast_rank);
+  }
+  _builder.addShapeConstr(lhs_index,
+                          asTensorInfo(_ctx.at(lhs_index).shape(), _ctx.at(lhs_index).type(),
+                                       _ctx.at(lhs_index).scale(), _ctx.at(lhs_index).zeroPoint()));
+  _builder.addShapeConstr(rhs_index,
+                          asTensorInfo(_ctx.at(rhs_index).shape(), _ctx.at(rhs_index).type(),
+                                       _ctx.at(rhs_index).scale(), _ctx.at(rhs_index).zeroPoint()));
 
   // Construct operation parameters
   struct Param
@@ -525,6 +529,8 @@ void Planner::visit(const ::internal::tflite::op::Add::Node &node)
 
   param.activation = static_cast<FuseCode>(_ctx.at(activation_index).asScalar<int32_t>());
 
+  const auto lhs_shape = _ctx.at(lhs_index).shape();
+  const auto rhs_shape = _ctx.at(rhs_index).shape();
   auto stage = [param, lhs_shape, rhs_shape](const IAllocationContext &ctx,
                                              IExecutionBuilder &builder) {
     auto ofm_alloc = ctx.at(::internal::tflite::operand::Index{param.ofm_index});
@@ -583,13 +589,26 @@ void Planner::visit(const ::internal::tflite::op::Sub::Node &node)
   const ::internal::tflite::operand::Index rhs_index{node.param().rhs_index};
   const ::internal::tflite::operand::Index activation_index{node.param().activation_index};
 
-  const auto ofm_shape = _ctx.at(ofm_index).shape().asTensor();
-  const auto lhs_shape = _ctx.at(lhs_index).shape().asTensor();
-  const auto rhs_shape = _ctx.at(rhs_index).shape().asTensor();
+  // Set Shape Constraints and TensorInfo
+  _builder.addShapeConstr(ofm_index,
+                          asTensorInfo(_ctx.at(ofm_index).shape(), _ctx.at(ofm_index).type(),
+                                       _ctx.at(ofm_index).scale(), _ctx.at(ofm_index).zeroPoint()));
 
-  _builder.addShapeConstr(ofm_index, asTensorInfo(ofm_shape, _ctx.at(ofm_index).type()));
-  _builder.addShapeConstr(lhs_index, _ctx.at(lhs_index), lhs_shape, rhs_index, _ctx.at(rhs_index),
-                          rhs_shape);
+  if (!(_ctx.at(lhs_index).shape() == _ctx.at(rhs_index).shape()))
+  {
+    const auto broadcast_rank =
+        std::max(_ctx.at(lhs_index).shape().rank(), _ctx.at(rhs_index).shape().rank());
+    const_cast<::internal::tflite::operand::Shape &>(_ctx.at(lhs_index).shape())
+        .extendRank(broadcast_rank);
+    const_cast<::internal::tflite::operand::Shape &>(_ctx.at(rhs_index).shape())
+        .extendRank(broadcast_rank);
+  }
+  _builder.addShapeConstr(lhs_index,
+                          asTensorInfo(_ctx.at(lhs_index).shape(), _ctx.at(lhs_index).type(),
+                                       _ctx.at(lhs_index).scale(), _ctx.at(lhs_index).zeroPoint()));
+  _builder.addShapeConstr(rhs_index,
+                          asTensorInfo(_ctx.at(rhs_index).shape(), _ctx.at(rhs_index).type(),
+                                       _ctx.at(rhs_index).scale(), _ctx.at(rhs_index).zeroPoint()));
 
   // Construct operation parameters
   struct Param
@@ -648,15 +667,30 @@ void Planner::visit(const ::internal::tflite::op::Mul::Node &node)
   const ::internal::tflite::operand::Index rhs_index{node.param().rhs_index};
   const ::internal::tflite::operand::Index activation_index{node.param().activation_index};
 
-  const auto ofm_shape = _ctx.at(ofm_index).shape().asTensor();
-  const auto lhs_shape = _ctx.at(lhs_index).shape().asTensor();
-  const auto rhs_shape = _ctx.at(rhs_index).shape().asTensor();
-
+  if (_ctx.at(ofm_index).scale() > 0)
+  {
+    assert(_ctx.at(ofm_index).scale() > _ctx.at(lhs_index).scale() * _ctx.at(rhs_index).scale());
+  }
+  // Set Shape Constraints and TensorInfo
   _builder.addShapeConstr(ofm_index,
-                          asTensorInfo(ofm_shape, _ctx.at(ofm_index).type(),
+                          asTensorInfo(_ctx.at(ofm_index).shape(), _ctx.at(ofm_index).type(),
                                        _ctx.at(ofm_index).scale(), _ctx.at(ofm_index).zeroPoint()));
-  _builder.addShapeConstr(lhs_index, _ctx.at(lhs_index), lhs_shape, rhs_index, _ctx.at(rhs_index),
-                          rhs_shape);
+
+  if (!(_ctx.at(lhs_index).shape() == _ctx.at(rhs_index).shape()))
+  {
+    const auto broadcast_rank =
+        std::max(_ctx.at(lhs_index).shape().rank(), _ctx.at(rhs_index).shape().rank());
+    const_cast<::internal::tflite::operand::Shape &>(_ctx.at(lhs_index).shape())
+        .extendRank(broadcast_rank);
+    const_cast<::internal::tflite::operand::Shape &>(_ctx.at(rhs_index).shape())
+        .extendRank(broadcast_rank);
+  }
+  _builder.addShapeConstr(lhs_index,
+                          asTensorInfo(_ctx.at(lhs_index).shape(), _ctx.at(lhs_index).type(),
+                                       _ctx.at(lhs_index).scale(), _ctx.at(lhs_index).zeroPoint()));
+  _builder.addShapeConstr(rhs_index,
+                          asTensorInfo(_ctx.at(rhs_index).shape(), _ctx.at(rhs_index).type(),
+                                       _ctx.at(rhs_index).scale(), _ctx.at(rhs_index).zeroPoint()));
 
   struct Param
   {
@@ -718,35 +752,27 @@ void Planner::visit(const ::internal::tflite::op::Div::Node &node)
 
   const ::internal::tflite::operand::Index activation_index{node.param().activation_index};
 
-  // TODO Should move to the place where the operand is handled, if it is possible.
-  // TODO Support general broadcasting. Currently, broadcast works only when one operand is scalar
-  //      or the operand's dimension size is one.
-  const auto ofm_shape = _ctx.at(ofm_index).shape();
-  const auto ofm_shape_rank = ofm_shape.rank();
-  if (ofm_shape_rank > 4)
-  {
-    throw std::runtime_error("Not supported, yet");
-  }
-
-  _builder.addShapeConstr(ofm_index, asTensorInfo(ofm_shape.asTensor(), _ctx.at(ofm_index).type()));
-
-  const auto lhs_shape = _ctx.at(lhs_index).shape();
-  const auto lhs_shape_rank = lhs_shape.rank();
-  if (lhs_shape_rank > 4)
-  {
-    throw std::runtime_error("Not supported, yet");
-  }
-
-  _builder.addShapeConstr(lhs_index, asTensorInfo(lhs_shape.asTensor(), _ctx.at(lhs_index).type()));
+  // Set Shape Constraints and TensorInfo
+  _builder.addShapeConstr(ofm_index,
+                          asTensorInfo(_ctx.at(ofm_index).shape(), _ctx.at(ofm_index).type(),
+                                       _ctx.at(ofm_index).scale(), _ctx.at(ofm_index).zeroPoint()));
 
-  const auto rhs_shape = _ctx.at(rhs_index).shape();
-  const auto rhs_shape_rank = rhs_shape.rank();
-  if (rhs_shape_rank > 4)
+  if (!(_ctx.at(lhs_index).shape() == _ctx.at(rhs_index).shape()))
   {
-    throw std::runtime_error("Not supported, yet");
+    const auto broadcast_rank =
+        std::max(_ctx.at(lhs_index).shape().rank(), _ctx.at(rhs_index).shape().rank());
+    const_cast<::internal::tflite::operand::Shape &>(_ctx.at(lhs_index).shape())
+        .extendRank(broadcast_rank);
+    const_cast<::internal::tflite::operand::Shape &>(_ctx.at(rhs_index).shape())
+        .extendRank(broadcast_rank);
   }
 
-  _builder.addShapeConstr(rhs_index, asTensorInfo(rhs_shape.asTensor(), _ctx.at(rhs_index).type()));
+  _builder.addShapeConstr(lhs_index,
+                          asTensorInfo(_ctx.at(lhs_index).shape(), _ctx.at(lhs_index).type(),
+                                       _ctx.at(lhs_index).scale(), _ctx.at(lhs_index).zeroPoint()));
+  _builder.addShapeConstr(rhs_index,
+                          asTensorInfo(_ctx.at(rhs_index).shape(), _ctx.at(rhs_index).type(),
+                                       _ctx.at(rhs_index).scale(), _ctx.at(rhs_index).zeroPoint()));
 
   // Construct operation parameters
   struct Param
@@ -3303,12 +3329,6 @@ public:
 public:
   void addShapeConstr(const ::internal::tflite::operand::Index &ind,
                       const ::arm_compute::TensorInfo &info) override;
-  void addShapeConstr(const ::internal::tflite::operand::Index &lhs_ind,
-                      const ::internal::tflite::operand::Object &lhs_obj,
-                      const nnfw::util::tensor::Shape &lhs_shape,
-                      const ::internal::tflite::operand::Index &rhs_ind,
-                      const ::internal::tflite::operand::Object &rhs_obj,
-                      const nnfw::util::tensor::Shape &rhs_shape) override;
 
 public:
   void addSubsumptionConstr(const ::internal::tflite::operand::Index &ind,
@@ -3326,12 +3346,6 @@ public:
 public:
   void finalize(void) const;
 
-public:
-  std::map<int, ::internal::tflite::operand::Shape> &shapeForBroadcast(void)
-  {
-    return _broadcasting_tensor_shape;
-  }
-
 private:
   ::internal::arm_compute::Plan &_plan;
 
@@ -3365,7 +3379,6 @@ private:
   std::map<int, std::shared_ptr<Subsumption>> _subsumption_ctx;
   std::map<int, Initializer> _initializer_ctx;
   std::vector<Stage> _stages;
-  std::map<int, ::internal::tflite::operand::Shape> _broadcasting_tensor_shape;
 };
 
 void PlanBuilder::addShapeConstr(const ::internal::tflite::operand::Index &ind,
@@ -3374,50 +3387,6 @@ void PlanBuilder::addShapeConstr(const ::internal::tflite::operand::Index &ind,
   _tensor_info_ctx[ind.asInt()] = info;
 }
 
-// Add tensor shape constraints considering broadcasting
-void PlanBuilder::addShapeConstr(const ::internal::tflite::operand::Index &lhs_ind,
-                                 const ::internal::tflite::operand::Object &lhs_obj,
-                                 const nnfw::util::tensor::Shape &lhs_shape,
-                                 const ::internal::tflite::operand::Index &rhs_ind,
-                                 const ::internal::tflite::operand::Object &rhs_obj,
-                                 const nnfw::util::tensor::Shape &rhs_shape)
-{
-  // right-side broadcasting
-  if (lhs_shape.rank() > rhs_shape.rank())
-  {
-    // ACL tensor info
-    _tensor_info_ctx[lhs_ind.asInt()] =
-        asTensorInfo(lhs_shape, lhs_obj.type(), lhs_obj.scale(), lhs_obj.zeroPoint());
-    _tensor_info_ctx[rhs_ind.asInt()] = asTensorInfoForBroadcast(
-        rhs_shape, rhs_obj.type(), lhs_shape.rank(), rhs_obj.scale(), rhs_obj.zeroPoint());
-
-    // TFlite broadcasting tensor shape
-    if (lhs_shape.rank() == 4)
-      _broadcasting_tensor_shape.emplace(rhs_ind.asInt(),
-                                         asTensorShapeForTFLiteBroadcast(rhs_shape));
-  }
-  // left-side broadcasting
-  else if (lhs_shape.rank() < rhs_shape.rank())
-  {
-    _tensor_info_ctx[lhs_ind.asInt()] = asTensorInfoForBroadcast(
-        lhs_shape, lhs_obj.type(), rhs_shape.rank(), lhs_obj.scale(), lhs_obj.zeroPoint());
-    _tensor_info_ctx[rhs_ind.asInt()] =
-        asTensorInfo(rhs_shape, rhs_obj.type(), rhs_obj.scale(), rhs_obj.zeroPoint());
-
-    if (rhs_shape.rank() == 4)
-      _broadcasting_tensor_shape.emplace(lhs_ind.asInt(),
-                                         asTensorShapeForTFLiteBroadcast(lhs_shape));
-  }
-  // no broadcasting
-  else
-  {
-    _tensor_info_ctx[lhs_ind.asInt()] =
-        asTensorInfo(lhs_shape, lhs_obj.type(), lhs_obj.scale(), lhs_obj.zeroPoint());
-    _tensor_info_ctx[rhs_ind.asInt()] =
-        asTensorInfo(rhs_shape, rhs_obj.type(), rhs_obj.scale(), rhs_obj.zeroPoint());
-  }
-}
-
 void PlanBuilder::addSubsumptionConstr(const ::internal::tflite::operand::Index &ind,
                                        const ::internal::tflite::operand::Index &base,
                                        const ::arm_compute::Coordinates &offset,
@@ -3607,20 +3576,12 @@ void PlanBuilder::finalize(void) const
       auto type = operands.at(operand_idx).type();
       auto shape = operands.at(operand_idx).shape();
 
-      auto it = _broadcasting_tensor_shape.find(operand_idx.asInt());
-      if (it != _broadcasting_tensor_shape.end())
-      {
-        rank = 4;
-        shape = it->second;
-      }
-
       switch (rank)
       {
         case 0: // scalar
         {
           switch (type)
           {
-            // NOTE In the case of broadcast, rank is 0 but it may be tensor type.
             case ANEURALNETWORKS_FLOAT32:
             case ANEURALNETWORKS_TENSOR_FLOAT32:
             {
@@ -3847,8 +3808,6 @@ int ANeuralNetworksCompilation_finish(ANeuralNetworksCompilation *compilation)
 
   plan_builder.finalize();
 
-  compilation->setShapeForBroadcast(plan_builder.shapeForBroadcast());
-
   return ANEURALNETWORKS_NO_ERROR;
 }
 
index 342e8be..80e1dea 100644 (file)
@@ -18,25 +18,11 @@ public:
 
 public:
   void publish(std::shared_ptr<const internal::arm_compute::Plan> &plan) { plan = _plan; }
-  void publish(std::shared_ptr<const std::map<int, ::internal::tflite::operand::Shape>>
-                   &broadcasting_tensor_shape)
-  {
-    broadcasting_tensor_shape = _broadcasting_tensor_shape;
-  }
-  void
-  setShapeForBroadcast(std::map<int, ::internal::tflite::operand::Shape> &broadcasting_tensor_shape)
-  {
-    _broadcasting_tensor_shape =
-        std::make_shared<const std::map<int, ::internal::tflite::operand::Shape>>(
-            broadcasting_tensor_shape);
-  }
   bool isFinished(void) { return _isFinished; }
   void markAsFinished() { _isFinished = true; }
 
 private:
   std::shared_ptr<internal::arm_compute::Plan> _plan;
-  std::shared_ptr<const std::map<int, ::internal::tflite::operand::Shape>>
-      _broadcasting_tensor_shape;
   bool _isFinished{false};
 };
 
index 66b0e77..b1c287b 100644 (file)
@@ -302,11 +302,6 @@ int ANeuralNetworksExecution_create(ANeuralNetworksCompilation *compilation,
   }
   *execution = execution_ptr;
 
-  std::shared_ptr<const std::map<int, ::internal::tflite::operand::Shape>>
-      broadcasting_tensor_shape;
-  compilation->publish(broadcasting_tensor_shape);
-  (*execution)->setShapeForBroadcast(broadcasting_tensor_shape);
-
   return ANEURALNETWORKS_NO_ERROR;
 }
 
@@ -341,16 +336,6 @@ int ANeuralNetworksExecution_setInput(ANeuralNetworksExecution *execution, int32
   auto shape = operands.at(operand_index).shape();
   auto rank = shape.rank();
 
-  if (execution->shapeForBroadcast() != nullptr)
-  {
-    auto it = execution->shapeForBroadcast()->find(operand_index.asInt());
-    if (it != execution->shapeForBroadcast()->end())
-    {
-      rank = 4;
-      shape = it->second;
-    }
-  }
-
   if (rank == 1)
   {
     const auto len = shape.dim(0);
index 015b3cd..4a43378 100644 (file)
@@ -17,21 +17,9 @@ public:
 
 public:
   const internal::arm_compute::Plan &plan(void) const { return *_plan; }
-  std::shared_ptr<const std::map<int, ::internal::tflite::operand::Shape>> shapeForBroadcast(void)
-  {
-    return _broadcasting_tensor_shape;
-  }
-  void setShapeForBroadcast(
-      const std::shared_ptr<const std::map<int, ::internal::tflite::operand::Shape>>
-          &broadcasting_tensor_shape)
-  {
-    _broadcasting_tensor_shape = broadcasting_tensor_shape;
-  }
 
 private:
   std::shared_ptr<const internal::arm_compute::Plan> _plan;
-  std::shared_ptr<const std::map<int, ::internal::tflite::operand::Shape>>
-      _broadcasting_tensor_shape = nullptr;
 
 public:
   // TODO Use InputIndex instead of int
index d892eaf..f7c1d3f 100644 (file)
@@ -71,6 +71,15 @@ nnfw::util::kernel::Shape Shape::asKernel(void) const
   return nnfw::util::kernel::Shape(count, depth, height, width);
 }
 
+// Extended dimension is filled with 1.
+void Shape::extendRank(size_t to_rank)
+{
+  for (int i = rank() + 1; i <= to_rank; ++i)
+  {
+    prepend(1);
+  }
+}
+
 } // namespace operand
 } // namespace tflite
 } // namespace internal
index 86bbe66..e8a3799 100644 (file)
@@ -53,6 +53,9 @@ public:
   nnfw::util::matrix::Shape asMatrix(void) const;
   nnfw::util::kernel::Shape asKernel(void) const;
   nnfw::util::tensor::Shape asTensor(void) const;
+
+public:
+  void extendRank(size_t);
 };
 
 } // namespace operand
index 9872456..ae5e39f 100644 (file)
   }
 }
 
-// ACL Broadcasting style in case of NHWC
-// TODO HCHW
-::arm_compute::TensorShape asTensorShapeForBroadcast(const nnfw::util::tensor::Shape &shape,
-                                                     const size_t baseRank)
-{
-  // The cases that large rank(baseRank) is 4 and small rank is less than 4 need to transform to
-  // broadcasting TensorInfo because order is different.
-  if (baseRank == 4)
-  {
-    if (shape.rank() == 0)
-    {
-      return ::arm_compute::TensorShape(1);
-    }
-    else if (shape.rank() == 1)
-    {
-      return ::arm_compute::TensorShape(1, 1, shape.dim(0), 1);
-    }
-    else if (shape.rank() == 2)
-    {
-      return ::arm_compute::TensorShape(shape.dim(0), 1, shape.dim(1), 1); // w c -> w h c n
-    }
-    else if (shape.rank() == 3)
-    {
-      return ::arm_compute::TensorShape(shape.dim(1), shape.dim(0), shape.dim(2),
-                                        1); // h w c -> w h c n
-    }
-    else if (shape.rank() == 4)
-    {
-      assert(shape.dim(0) ==
-             1); // In case of ADD, SUB, MUL and DIV at ACL OpenCL, 3D inputs are supported.
-      return ::arm_compute::TensorShape(shape.dim(2), shape.dim(1), shape.dim(3),
-                                        shape.dim(0)); // n h w c -> W H C N
-    }
-    else
-    {
-      throw std::runtime_error("Not supported, yet");
-    }
-  }
-  // Other cases that larger rank <= 3 don't need to transform because broadcast shape is the same
-  // as orignal. For example, ::arm_compute::TensorShape(shape.dim(0), 1, 1) ==
-  // ::arm_compute::TensorShape(shape.dim(0).
-  else
-  {
-    return asTensorShape(shape);
-  }
-}
-
-// TFLite broadcasting style: used for reading an input as broadcasting shape
-internal::tflite::operand::Shape
-asTensorShapeForTFLiteBroadcast(const nnfw::util::tensor::Shape &shape)
-{
-  internal::tflite::operand::Shape broadcastShape(4);
-  if (shape.rank() == 0)
-  {
-    broadcastShape.dim(0) = 1;
-    broadcastShape.dim(1) = 1;
-    broadcastShape.dim(2) = 1;
-    broadcastShape.dim(3) = 1;
-  }
-  else if (shape.rank() == 1)
-  {
-    broadcastShape.dim(0) = 1;
-    broadcastShape.dim(1) = 1;
-    broadcastShape.dim(2) = 1;
-    broadcastShape.dim(3) = shape.dim(0);
-  }
-  else if (shape.rank() == 2)
-  {
-    broadcastShape.dim(0) = 1;
-    broadcastShape.dim(1) = 1;
-    broadcastShape.dim(2) = shape.dim(0);
-    broadcastShape.dim(3) = shape.dim(1);
-  }
-  else if (shape.rank() == 3)
-  {
-    broadcastShape.dim(0) = 1;
-    broadcastShape.dim(1) = shape.dim(0);
-    broadcastShape.dim(2) = shape.dim(1);
-    broadcastShape.dim(3) = shape.dim(2);
-  }
-  else
-  {
-    throw std::runtime_error("Not supported, yet");
-  }
-  return broadcastShape;
-}
-
 inline ::arm_compute::TensorShape asTensorShape(const internal::tflite::operand::Shape &shape)
 {
   const uint32_t rank = shape.rank();
@@ -236,15 +149,6 @@ inline ::arm_compute::TensorShape asTensorShape(const internal::tflite::operand:
                                    asQuantizationInfo(scale, zeroPoint));
 }
 
-::arm_compute::TensorInfo asTensorInfoForBroadcast(const nnfw::util::tensor::Shape &shape,
-                                                   const int32_t type, const size_t baseRank,
-                                                   const float scale = 0.0f,
-                                                   const int32_t zeroPoint = 0)
-{
-  return ::arm_compute::TensorInfo(asTensorShapeForBroadcast(shape, baseRank), 1, asDataType(type),
-                                   asQuantizationInfo(scale, zeroPoint));
-}
-
 ::arm_compute::TensorInfo asTensorInfo(int32_t size, const int32_t type, const float scale = 0.0f,
                                        const int32_t zeroPoint = 0)
 {