Implement LogicalOr op in PACL (#3944)
authorShubham Gupta/SNAP /SRI-Bangalore/Engineer/삼성전자 <shub98.gupta@samsung.com>
Mon, 10 Dec 2018 01:53:41 +0000 (07:23 +0530)
committer이춘석/동작제어Lab(SR)/Staff Engineer/삼성전자 <chunseok.lee@samsung.com>
Mon, 10 Dec 2018 01:53:41 +0000 (10:53 +0900)
This patch will add implementation of LogicalOr op
in PACL

Signed-off-by: shubham <shub98.gupta@samsung.com>
runtimes/pure_arm_compute/src/compilation.cc

index 34cb1f8..ee747c7 100644 (file)
@@ -65,6 +65,7 @@
 #include <arm_compute/runtime/CL/functions/CLSquaredDifference.h>
 #include <arm_compute/runtime/CL/functions/CLNeg.h>
 #include <arm_compute/runtime/CL/functions/CLNotEqual.h>
+#include <arm_compute/runtime/CL/functions/CLBinaryLogicalOp.h>
 
 #include <arm_compute/runtime/SubTensor.h>
 #include <arm_compute/runtime/NEON/functions/NESoftmaxLayer.h>
@@ -5593,8 +5594,69 @@ void Planner::visit(const ::internal::tflite::op::LogicalOr::Node &node)
 {
   VERBOSE(LogicalOr) << "Configure LogicalOr operation" << std::endl;
 
-  // TODO Implement LogicalOr op
-  throw std::runtime_error("Not supported, yet");
+  const ::internal::tflite::operand::Index output_index{node.param().output_index};
+  const ::internal::tflite::operand::Index input1_index{node.param().input1_index};
+  const ::internal::tflite::operand::Index input2_index{node.param().input2_index};
+
+  // Set Shape Constraints and TensorInfo
+  _builder.addShapeConstr(output_index,
+                          asTensorInfo(asTensorShape(_ctx.at(output_index).shape(), false),
+                                       _ctx.at(output_index).type(), _ctx.at(output_index).scale(),
+                                       _ctx.at(output_index).zeroPoint()));
+
+  if (!(_ctx.at(input1_index).shape() == _ctx.at(input2_index).shape()))
+  {
+    const auto broadcast_rank =
+        std::max(_ctx.at(input1_index).shape().rank(), _ctx.at(input2_index).shape().rank());
+    const_cast<::internal::tflite::operand::Shape &>(_ctx.at(input1_index).shape())
+        .extendRank(broadcast_rank);
+    const_cast<::internal::tflite::operand::Shape &>(_ctx.at(input2_index).shape())
+        .extendRank(broadcast_rank);
+  }
+
+  _builder.addShapeConstr(input1_index,
+                          asTensorInfo(asTensorShape(_ctx.at(input1_index).shape(), false),
+                                       _ctx.at(input1_index).type(), _ctx.at(input1_index).scale(),
+                                       _ctx.at(input1_index).zeroPoint()));
+  _builder.addShapeConstr(input2_index,
+                          asTensorInfo(asTensorShape(_ctx.at(input2_index).shape(), false),
+                                       _ctx.at(input2_index).type(), _ctx.at(input2_index).scale(),
+                                       _ctx.at(input2_index).zeroPoint()));
+
+  // Construct operation parameters
+  struct Param
+  {
+    int output_index;
+    int input1_index;
+    int input2_index;
+  };
+
+  Param param;
+
+  param.output_index = output_index.asInt();
+  param.input1_index = input1_index.asInt();
+  param.input2_index = input2_index.asInt();
+  auto stage = [param](const IAllocationContext &ctx, IExecutionBuilder &builder) {
+    auto output_alloc = ctx.at(::internal::tflite::operand::Index{param.output_index});
+    auto input1_alloc = ctx.at(::internal::tflite::operand::Index{param.input1_index});
+    auto input2_alloc = ctx.at(::internal::tflite::operand::Index{param.input2_index});
+    if (::internal::arm_compute::isGpuMode())
+    {
+      auto fn = nnfw::cpp14::make_unique<::arm_compute::CLBinaryLogicalOp>();
+
+      fn->configure(CAST_CL(input1_alloc), CAST_CL(input2_alloc), CAST_CL(output_alloc),
+                    ::arm_compute::LogicalOperation::OR);
+
+      builder.append("LogicalOr", std::move(fn));
+    }
+    else
+    {
+      // TODO Add NEON support
+
+      throw std::runtime_error("Not supported yet");
+    }
+  };
+  _builder.addStage(stage);
 }
 
 class AllocationContext final : public IAllocationContext