[neurun/cpu] Fix broadcast handling in HEScheduler (#9258)
authorDilshodzhon Poshshoev/AI Tools Lab /SRR/Engineer/Samsung Electronics <d.poshshoev@samsung.com>
Thu, 28 Nov 2019 08:16:55 +0000 (11:16 +0300)
committer이한종/On-Device Lab(SR)/Engineer/삼성전자 <hanjoung.lee@samsung.com>
Thu, 28 Nov 2019 08:16:55 +0000 (17:16 +0900)
Broadcast for Mul/Sub/Add isn't support on cpu backend. Add them to skip
condition and throw exception in ShapeFixer

Signed-off-by: Poshshoev Dilshodzhon <d.poshshoev@samsung.com>
runtime/neurun/backend/cpu/ShapeFixer.cc
runtime/neurun/core/src/compiler/HEScheduler.cc

index 005ce89..d874138 100644 (file)
@@ -90,6 +90,8 @@ void ShapeFixer::visit(const model::operation::Add &node)
   // Broadcast
   if (!(_ctx.at(lhs_index).shape() == _ctx.at(rhs_index).shape()))
   {
+    // See issue #8553
+    throw std::runtime_error{"ShapeFixer: NYI for broadcast Add"};
     const auto broadcast_rank =
         std::max(_ctx.at(lhs_index).shape().rank(), _ctx.at(rhs_index).shape().rank());
     const_cast<::neurun::model::Shape &>(_ctx.at(lhs_index).shape()).extendRank(broadcast_rank);
@@ -112,6 +114,8 @@ void ShapeFixer::visit(const model::operation::Sub &node)
   // Broadcast
   if (!(_ctx.at(lhs_index).shape() == _ctx.at(rhs_index).shape()))
   {
+    // See issue #8553
+    throw std::runtime_error{"ShapeFixer: NYI for broadcast Sub"};
     const auto broadcast_rank =
         std::max(_ctx.at(lhs_index).shape().rank(), _ctx.at(rhs_index).shape().rank());
     const_cast<::neurun::model::Shape &>(_ctx.at(lhs_index).shape()).extendRank(broadcast_rank);
@@ -132,6 +136,8 @@ void ShapeFixer::visit(const model::operation::Mul &node)
   // Broadcast
   if (!(_ctx.at(lhs_index).shape() == _ctx.at(rhs_index).shape()))
   {
+    // See issue #8553
+    throw std::runtime_error{"ShapeFixer: NYI for broadcast Mul"};
     const auto broadcast_rank =
         std::max(_ctx.at(lhs_index).shape().rank(), _ctx.at(rhs_index).shape().rank());
     const_cast<::neurun::model::Shape &>(_ctx.at(lhs_index).shape()).extendRank(broadcast_rank);
index b8129bb..f47ca86 100644 (file)
@@ -68,16 +68,15 @@ static bool isWorkaroundSkip(const graph::Graph &graph, const backend::Backend *
       Adding exception in stage doesn't help. Because if there is a record for add without
       broadcast, scheduling will select it since it doesn't distinguish broadcast and
       non-broadcast like it does for quant non-quantized*/
-  if (backend->config()->id() == "cpu" && node.opcode() == model::OpCode::Add)
+  if (backend->config()->id() == "cpu" &&
+      (node.opcode() == model::OpCode::Add || node.opcode() == model::OpCode::Sub ||
+       node.opcode() == model::OpCode::Mul))
   {
     const auto lhs_index{node.getInputs().at(model::operation::Add::Input::LHS)};
     const auto rhs_index{node.getInputs().at(model::operation::Add::Input::RHS)};
-    /*Broadcasting isn't supported on CPU: no way to differ the existing exec_time record of
-     * Add with and without broadcasting*/
-    /*Quant is also unsupported: throws an exception in run(): in case of scheduling without warm-up
-      it isn't catched by tryBackend()*/
-    if (quant ||
-        !(graph.operands().at(lhs_index).shape() == graph.operands().at(rhs_index).shape()))
+    /*Broadcasting isn't supported on CPU: no way to differ the existing exec_time record with and
+     * without broadcasting*/
+    if (!(graph.operands().at(lhs_index).shape() == graph.operands().at(rhs_index).shape()))
     {
       return true;
     }