From 6844b001dcfd1c4f637e338cfb70873fa3ec394b Mon Sep 17 00:00:00 2001 From: Dilshodzhon Poshshoev/AI Tools Lab /SRR/Engineer/Samsung Electronics Date: Thu, 28 Nov 2019 11:16:55 +0300 Subject: [PATCH] [neurun/cpu] Fix broadcast handling in HEScheduler (#9258) Broadcast for Mul/Sub/Add isn't support on cpu backend. Add them to skip condition and throw exception in ShapeFixer Signed-off-by: Poshshoev Dilshodzhon --- runtime/neurun/backend/cpu/ShapeFixer.cc | 6 ++++++ runtime/neurun/core/src/compiler/HEScheduler.cc | 13 ++++++------- 2 files changed, 12 insertions(+), 7 deletions(-) diff --git a/runtime/neurun/backend/cpu/ShapeFixer.cc b/runtime/neurun/backend/cpu/ShapeFixer.cc index 005ce89..d874138 100644 --- a/runtime/neurun/backend/cpu/ShapeFixer.cc +++ b/runtime/neurun/backend/cpu/ShapeFixer.cc @@ -90,6 +90,8 @@ void ShapeFixer::visit(const model::operation::Add &node) // Broadcast if (!(_ctx.at(lhs_index).shape() == _ctx.at(rhs_index).shape())) { + // See issue #8553 + throw std::runtime_error{"ShapeFixer: NYI for broadcast Add"}; const auto broadcast_rank = std::max(_ctx.at(lhs_index).shape().rank(), _ctx.at(rhs_index).shape().rank()); const_cast<::neurun::model::Shape &>(_ctx.at(lhs_index).shape()).extendRank(broadcast_rank); @@ -112,6 +114,8 @@ void ShapeFixer::visit(const model::operation::Sub &node) // Broadcast if (!(_ctx.at(lhs_index).shape() == _ctx.at(rhs_index).shape())) { + // See issue #8553 + throw std::runtime_error{"ShapeFixer: NYI for broadcast Sub"}; const auto broadcast_rank = std::max(_ctx.at(lhs_index).shape().rank(), _ctx.at(rhs_index).shape().rank()); const_cast<::neurun::model::Shape &>(_ctx.at(lhs_index).shape()).extendRank(broadcast_rank); @@ -132,6 +136,8 @@ void ShapeFixer::visit(const model::operation::Mul &node) // Broadcast if (!(_ctx.at(lhs_index).shape() == _ctx.at(rhs_index).shape())) { + // See issue #8553 + throw std::runtime_error{"ShapeFixer: NYI for broadcast Mul"}; const auto broadcast_rank = std::max(_ctx.at(lhs_index).shape().rank(), _ctx.at(rhs_index).shape().rank()); const_cast<::neurun::model::Shape &>(_ctx.at(lhs_index).shape()).extendRank(broadcast_rank); diff --git a/runtime/neurun/core/src/compiler/HEScheduler.cc b/runtime/neurun/core/src/compiler/HEScheduler.cc index b8129bb..f47ca86 100644 --- a/runtime/neurun/core/src/compiler/HEScheduler.cc +++ b/runtime/neurun/core/src/compiler/HEScheduler.cc @@ -68,16 +68,15 @@ static bool isWorkaroundSkip(const graph::Graph &graph, const backend::Backend * Adding exception in stage doesn't help. Because if there is a record for add without broadcast, scheduling will select it since it doesn't distinguish broadcast and non-broadcast like it does for quant non-quantized*/ - if (backend->config()->id() == "cpu" && node.opcode() == model::OpCode::Add) + if (backend->config()->id() == "cpu" && + (node.opcode() == model::OpCode::Add || node.opcode() == model::OpCode::Sub || + node.opcode() == model::OpCode::Mul)) { const auto lhs_index{node.getInputs().at(model::operation::Add::Input::LHS)}; const auto rhs_index{node.getInputs().at(model::operation::Add::Input::RHS)}; - /*Broadcasting isn't supported on CPU: no way to differ the existing exec_time record of - * Add with and without broadcasting*/ - /*Quant is also unsupported: throws an exception in run(): in case of scheduling without warm-up - it isn't catched by tryBackend()*/ - if (quant || - !(graph.operands().at(lhs_index).shape() == graph.operands().at(rhs_index).shape())) + /*Broadcasting isn't supported on CPU: no way to differ the existing exec_time record with and + * without broadcasting*/ + if (!(graph.operands().at(lhs_index).shape() == graph.operands().at(rhs_index).shape())) { return true; } -- 2.7.4