From d7f97bbf7b1c7e6bbb96f207e78605b1fd431b86 Mon Sep 17 00:00:00 2001 From: =?utf8?q?=EC=9D=B4=ED=95=9C=EC=A2=85/On-Device=20Lab=28SR=29/Enginee?= =?utf8?q?r/=EC=82=BC=EC=84=B1=EC=A0=84=EC=9E=90?= Date: Tue, 26 Nov 2019 13:27:33 +0900 Subject: [PATCH] [neurun] Support Tracing in LinearExecutor (#9183) Support Tracing in LinearExecutor - Let LinearExecutor have info along with functions - Make use of Observee object Signed-off-by: Hanjoung Lee --- runtime/neurun/core/src/compiler/CodeWithInfo.h | 44 ++++++++++++++++++++ .../neurun/core/src/compiler/ExecutorFactory.cc | 48 +++++++++++++--------- runtime/neurun/core/src/compiler/Linear.h | 2 + runtime/neurun/core/src/exec/LinearExecutor.cc | 15 ++++++- runtime/neurun/core/src/exec/LinearExecutor.h | 7 ++-- 5 files changed, 93 insertions(+), 23 deletions(-) create mode 100644 runtime/neurun/core/src/compiler/CodeWithInfo.h diff --git a/runtime/neurun/core/src/compiler/CodeWithInfo.h b/runtime/neurun/core/src/compiler/CodeWithInfo.h new file mode 100644 index 0000000..73dd105 --- /dev/null +++ b/runtime/neurun/core/src/compiler/CodeWithInfo.h @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __NEURUN_COMPILER_CODE_WITH_INFO_H__ +#define __NEURUN_COMPILER_CODE_WITH_INFO_H__ + +#include + +#include "compiler/Linear.h" +#include "exec/IFunction.h" + +namespace neurun +{ +namespace compiler +{ + +struct CodeWithInfo +{ + Linear::Element elem; + std::unique_ptr fn; + + CodeWithInfo(const Linear::Element &elem, std::unique_ptr &&fn) + : elem{elem}, fn{std::move(fn)} + { + } +}; + +} // namespace compiler +} // namespace neurun + +#endif // __NEURUN_COMPILER_CODE_WITH_INFO_H__ diff --git a/runtime/neurun/core/src/compiler/ExecutorFactory.cc b/runtime/neurun/core/src/compiler/ExecutorFactory.cc index afbf7c8..2cfb8a7 100644 --- a/runtime/neurun/core/src/compiler/ExecutorFactory.cc +++ b/runtime/neurun/core/src/compiler/ExecutorFactory.cc @@ -32,6 +32,7 @@ #include "backend/IShapeFixer.h" #include "backend/ITensorRegister.h" #include "cpp14/memory.h" +#include "CodeWithInfo.h" namespace neurun { @@ -61,9 +62,6 @@ exec::IExecutor *ExecutorFactory::createLinearExecutor(graph::Graph &graph) auto operand_context = std::make_shared(); const auto &operands = graph.operands(); - // Compilation result will be filled in operand_context and operation_sequence - auto function_sequence = std::make_shared(); - // linearize assert(!graph.isBuildingPhase()); auto linear = nnfw::cpp14::make_unique(graph); @@ -120,30 +118,31 @@ exec::IExecutor *ExecutorFactory::createLinearExecutor(graph::Graph &graph) class ExecutionBuilder final : public IExecutionBuilder { public: - ExecutionBuilder(exec::FunctionSequence &functions) : _functions{functions} + void append(std::unique_ptr &&f) override { - // DO NOTHING + _code.emplace_back(_next_elem, std::move(f)); } - public: - void append(std::unique_ptr<::neurun::exec::IFunction> &&f) override - { - _functions.append(std::move(f)); - } + void setNextElem(const compiler::Linear::Element &next_elem) { _next_elem = next_elem; } + std::vector releaseCode() { return std::move(_code); } private: - exec::FunctionSequence &_functions; + compiler::Linear::Element _next_elem; + std::vector _code; }; - auto execution_builder = nnfw::cpp14::make_unique(*function_sequence); + ExecutionBuilder builder; // Generate kernels linear->iterate([&](const compiler::Linear::Element &element) { auto backend = element.lower_info->backend(); + builder.setNextElem(element); auto kernel_gen = graph.backend_resolver()->getBackendContext(backend)->kernel_gen; - kernel_gen->generate(*element.subgraph, execution_builder.get()); + kernel_gen->generate(*element.subgraph, &builder); }); + auto code = builder.releaseCode(); + for (auto &tensor_builder : tensor_builders) { tensor_builder->allocateConsts(); @@ -157,15 +156,16 @@ exec::IExecutor *ExecutorFactory::createLinearExecutor(graph::Graph &graph) graph.backend_resolver()->getBackendContext(backend)->constant_initializer->run(); } - function_sequence->iterate([&](exec::IFunction &ifunc) { + for (auto &&e : code) + { // NOTE. It may need avoiding prepare() for some operations // Ref: https://github.sec.samsung.net/STAR/nnfw/issues/7326 - ifunc.prepare(); + e.fn->prepare(); for (auto &tensor_builder : tensor_builders) { tensor_builder->postFunctionPrepare(); } - }); + } for (auto &tensor_builder : tensor_builders) { @@ -193,9 +193,19 @@ exec::IExecutor *ExecutorFactory::createLinearExecutor(graph::Graph &graph) tensor_mgrs->insert(tensor_builder->releaseTensorManager()); } - return new exec::LinearExecutor{graph.shareModel(), graph.releaseSubgraphs(), - operand_context, graph.releaseLowerInfo(), - std::move(tensor_mgrs), function_sequence}; + auto exec = + new exec::LinearExecutor{graph.shareModel(), graph.releaseSubgraphs(), operand_context, + graph.releaseLowerInfo(), std::move(tensor_mgrs), std::move(code)}; + + const std::string trace_filepath = util::getConfigString(util::config::TRACE_FILEPATH); + if (!trace_filepath.empty()) + { + std::unique_ptr ctp = + nnfw::cpp14::make_unique(trace_filepath); + exec->addObserver(std::move(ctp)); + } + + return exec; } exec::IExecutor *ExecutorFactory::createDataflowExecutor(graph::Graph &graph, bool parallel) diff --git a/runtime/neurun/core/src/compiler/Linear.h b/runtime/neurun/core/src/compiler/Linear.h index 2c3e470..89f12e8 100644 --- a/runtime/neurun/core/src/compiler/Linear.h +++ b/runtime/neurun/core/src/compiler/Linear.h @@ -50,6 +50,8 @@ public: const model::Subgraph *subgraph; const graph::operation::LowerInfo *lower_info; + Element() : subgraph{nullptr}, lower_info{nullptr} {} + Element(const model::Subgraph *subgraph, const graph::operation::LowerInfo *lower_info) : subgraph{subgraph}, lower_info{lower_info} { diff --git a/runtime/neurun/core/src/exec/LinearExecutor.cc b/runtime/neurun/core/src/exec/LinearExecutor.cc index 35197a2..d87ec40 100644 --- a/runtime/neurun/core/src/exec/LinearExecutor.cc +++ b/runtime/neurun/core/src/exec/LinearExecutor.cc @@ -21,7 +21,20 @@ namespace neurun namespace exec { -void LinearExecutor::executeImpl() { _fn_seq->run(); } +void LinearExecutor::executeImpl() +{ + _subject.notifyModelBegin(this); + for (auto &&code : _code) + { + // FIXME Assumes only one operation in a subgraph + const auto op = code.elem.subgraph->operations().at(0).node; + const auto backend = code.elem.lower_info->backend(); + _subject.notifyJobBegin(this, op, backend); + code.fn->run(); + _subject.notifyJobEnd(this, op, backend); + } + _subject.notifyModelEnd(this); +} } // namespace exec } // namespace neurun diff --git a/runtime/neurun/core/src/exec/LinearExecutor.h b/runtime/neurun/core/src/exec/LinearExecutor.h index 1369470..401e55f 100644 --- a/runtime/neurun/core/src/exec/LinearExecutor.h +++ b/runtime/neurun/core/src/exec/LinearExecutor.h @@ -25,6 +25,7 @@ #include "ExecutorBase.h" #include "compiler/Linear.h" #include "exec/FunctionSequence.h" +#include "compiler/CodeWithInfo.h" namespace neurun { @@ -47,10 +48,10 @@ public: const std::shared_ptr &operand_context, std::unique_ptr lower_info, std::unique_ptr tensor_mgrs, - const std::shared_ptr &fn_seq) + std::vector &&code) : ExecutorBase{model, std::move(subgraphs), operand_context, std::move(lower_info), std::move(tensor_mgrs)}, - _fn_seq{fn_seq} + _code{std::move(code)} { } @@ -58,7 +59,7 @@ public: void executeImpl(void) override; private: - std::shared_ptr _fn_seq; + std::vector _code; }; } // namespace exec -- 2.7.4