From 4a7fd026d8b7cfa3fcd1ed9f33f0a5466f62c864 Mon Sep 17 00:00:00 2001 From: =?utf8?q?=EC=9D=B4=ED=95=9C=EC=A2=85/On-Device=20Lab=28SR=29/Enginee?= =?utf8?q?r/=EC=82=BC=EC=84=B1=EC=A0=84=EC=9E=90?= Date: Tue, 19 Nov 2019 16:55:08 +0900 Subject: [PATCH] [neurun] Fix for const model ouput (again) (#9002) * [neurun] Fix for const model ouput (again) Handle properly when a model has const model outputs. This makes it not crash however the results are still incorrect since Const values are not initialized yet. Signed-off-by: Hanjoung Lee * Fix failure It is wrong that assuming just one use for inputs. * Merge fix --- runtime/neurun/core/src/compiler/Linear.cc | 26 +++++++++++++------ runtime/neurun/core/src/graph/Graph.cc | 36 +++++++++++--------------- tests/tools/nnpackage_run/src/nnpackage_run.cc | 7 ----- 3 files changed, 33 insertions(+), 36 deletions(-) diff --git a/runtime/neurun/core/src/compiler/Linear.cc b/runtime/neurun/core/src/compiler/Linear.cc index 901abbc..126afeb 100644 --- a/runtime/neurun/core/src/compiler/Linear.cc +++ b/runtime/neurun/core/src/compiler/Linear.cc @@ -207,12 +207,18 @@ void Linear::planTensors() // NOTE This assumes an operand can have one layout, and only Permutate can have // different layouts for input and output const auto &def = *obj.getDef().list().cbegin(); - auto frontend_layout = subgraphs.at(subgraphs.getOperation(def)).getLayout(); - if (frontend_layout == model::Layout::UNKNOWN) + auto frontend_layout = model::Layout::UNKNOWN; + + if (subgraphs.containsOperation(def)) { - const auto &use = *obj.getUses().list().cbegin(); - frontend_layout = subgraphs.at(subgraphs.getOperation(use)).getLayout(); + frontend_layout = subgraphs.at(subgraphs.getOperation(def)).getLayout(); + if (frontend_layout == model::Layout::UNKNOWN) + { + const auto &use = *obj.getUses().list().cbegin(); + frontend_layout = subgraphs.at(subgraphs.getOperation(use)).getLayout(); + } } + const auto backend_layout = lower_info->def_factors().getOnlyElement().layout(); tensor_builder->registerTensorInfo(ind, info, frontend_layout, backend_layout, is_const); } @@ -285,15 +291,19 @@ void Linear::planTensors() for (const auto &ind : _graph.getOutputs()) { --uses_map[ind]; - assert(uses_map[ind] == 0); - tensor_builder_map[ind]->notifyLastUse(ind); + if (uses_map[ind] == 0) // To prevent notifyLastUse from being called twice + { + tensor_builder_map[ind]->notifyLastUse(ind); + } } for (const auto &ind : constants) { --uses_map[ind]; - assert(uses_map[ind] == 0); - tensor_builder_map[ind]->notifyLastUse(ind); + if (uses_map[ind] == 0) // To prevent notifyLastUse from being called twice + { + tensor_builder_map[ind]->notifyLastUse(ind); + } } assert(std::all_of( diff --git a/runtime/neurun/core/src/graph/Graph.cc b/runtime/neurun/core/src/graph/Graph.cc index 91370b4..53ed3d1 100644 --- a/runtime/neurun/core/src/graph/Graph.cc +++ b/runtime/neurun/core/src/graph/Graph.cc @@ -340,22 +340,26 @@ void Graph::lower(void) _subgraphs->dump("merged and sorted operations without permutation"); -// NOTE This is desired way to handle model input and outputs however getDefaultBackend() is -// cpu backend dependent for now we cannot use it. -#if 0 - // Add def backend to model input/output operand as default backend - for (auto index : getInputs()) + const auto default_backend = backend::BackendManager::get().getDefault(); + for (auto index : _model->inputs) { + // Pick just any one from the uses, here the first one is chosen + // For the other uses, Permute operations will be inserted later auto &&lower_info = operands_lower_info.at(index); - lower_info->addDefBackend(_backend_resolver->getDefaultBackend()); + assert(lower_info->use_factors().size() > 0); + lower_info->addDefPermuteFactor(*lower_info->use_factors().begin()); } - - for (auto index : getOutputs()) + for (auto index : _model->outputs) { auto &&lower_info = operands_lower_info.at(index); - lower_info->addUseBackend(_backend_resolver->getDefaultBackend()); + if (_model->operands.at(index).isConstant()) + { + lower_info->addDefPermuteFactor(operand::PermuteFactor{ + default_backend, + model::Layout::NHWC // TODO Get frontend layout of this node from IR + }); + } } -#endif // Add DefFactor constants same as UseFactor // NOTE This assumes a constant operand is used by only one operation @@ -366,18 +370,8 @@ void Graph::lower(void) auto &&lower_info = operands_lower_info.at(operand); if (lower_info->def_factors().empty()) { - // NOTE Handling model inputs here is not ideal. See above NOTE comment. - // If it is a model input, not a constant - if (_model->inputs.contains(operand)) - { - // If one or more elements then any PermuteFactor is OK so pick first one - if (!lower_info->use_factors().empty()) - { - lower_info->addDefPermuteFactor(*lower_info->use_factors().begin()); - } - } // If it is a constant - else + if (!_model->inputs.contains(operand)) { lower_info->addDefPermuteFactor(lower_info->use_factors().getOnlyElement()); } diff --git a/tests/tools/nnpackage_run/src/nnpackage_run.cc b/tests/tools/nnpackage_run/src/nnpackage_run.cc index d6e71bc..5442898 100644 --- a/tests/tools/nnpackage_run/src/nnpackage_run.cc +++ b/tests/tools/nnpackage_run/src/nnpackage_run.cc @@ -112,13 +112,6 @@ int main(const int argc, char **argv) // verify input and output - if (num_inputs == 0) - { - std::cerr << "[ ERROR ] " - << "No inputs in model => execution is not possible" << std::endl; - exit(1); - } - auto verifyInputTypes = [session]() { uint32_t sz; NNPR_ENSURE_STATUS(nnfw_input_size(session, &sz)); -- 2.7.4