[neurun] Support Tracing in LinearExecutor (#9183)
author이한종/On-Device Lab(SR)/Engineer/삼성전자 <hanjoung.lee@samsung.com>
Tue, 26 Nov 2019 04:27:33 +0000 (13:27 +0900)
committerGitHub Enterprise <noreply-CODE@samsung.com>
Tue, 26 Nov 2019 04:27:33 +0000 (13:27 +0900)
Support Tracing in LinearExecutor

- Let LinearExecutor have info along with functions
- Make use of Observee object

Signed-off-by: Hanjoung Lee <hanjoung.lee@samsung.com>
runtime/neurun/core/src/compiler/CodeWithInfo.h [new file with mode: 0644]
runtime/neurun/core/src/compiler/ExecutorFactory.cc
runtime/neurun/core/src/compiler/Linear.h
runtime/neurun/core/src/exec/LinearExecutor.cc
runtime/neurun/core/src/exec/LinearExecutor.h

diff --git a/runtime/neurun/core/src/compiler/CodeWithInfo.h b/runtime/neurun/core/src/compiler/CodeWithInfo.h
new file mode 100644 (file)
index 0000000..73dd105
--- /dev/null
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_COMPILER_CODE_WITH_INFO_H__
+#define __NEURUN_COMPILER_CODE_WITH_INFO_H__
+
+#include <memory>
+
+#include "compiler/Linear.h"
+#include "exec/IFunction.h"
+
+namespace neurun
+{
+namespace compiler
+{
+
+struct CodeWithInfo
+{
+  Linear::Element elem;
+  std::unique_ptr<exec::IFunction> fn;
+
+  CodeWithInfo(const Linear::Element &elem, std::unique_ptr<exec::IFunction> &&fn)
+      : elem{elem}, fn{std::move(fn)}
+  {
+  }
+};
+
+} // namespace compiler
+} // namespace neurun
+
+#endif // __NEURUN_COMPILER_CODE_WITH_INFO_H__
index afbf7c8..2cfb8a7 100644 (file)
@@ -32,6 +32,7 @@
 #include "backend/IShapeFixer.h"
 #include "backend/ITensorRegister.h"
 #include "cpp14/memory.h"
+#include "CodeWithInfo.h"
 
 namespace neurun
 {
@@ -61,9 +62,6 @@ exec::IExecutor *ExecutorFactory::createLinearExecutor(graph::Graph &graph)
   auto operand_context = std::make_shared<OperandContext>();
   const auto &operands = graph.operands();
 
-  // Compilation result will be filled in operand_context and operation_sequence
-  auto function_sequence = std::make_shared<exec::FunctionSequence>();
-
   // linearize
   assert(!graph.isBuildingPhase());
   auto linear = nnfw::cpp14::make_unique<Linear>(graph);
@@ -120,30 +118,31 @@ exec::IExecutor *ExecutorFactory::createLinearExecutor(graph::Graph &graph)
   class ExecutionBuilder final : public IExecutionBuilder
   {
   public:
-    ExecutionBuilder(exec::FunctionSequence &functions) : _functions{functions}
+    void append(std::unique_ptr<exec::IFunction> &&f) override
     {
-      // DO NOTHING
+      _code.emplace_back(_next_elem, std::move(f));
     }
 
-  public:
-    void append(std::unique_ptr<::neurun::exec::IFunction> &&f) override
-    {
-      _functions.append(std::move(f));
-    }
+    void setNextElem(const compiler::Linear::Element &next_elem) { _next_elem = next_elem; }
+    std::vector<CodeWithInfo> releaseCode() { return std::move(_code); }
 
   private:
-    exec::FunctionSequence &_functions;
+    compiler::Linear::Element _next_elem;
+    std::vector<CodeWithInfo> _code;
   };
 
-  auto execution_builder = nnfw::cpp14::make_unique<ExecutionBuilder>(*function_sequence);
+  ExecutionBuilder builder;
 
   // Generate kernels
   linear->iterate([&](const compiler::Linear::Element &element) {
     auto backend = element.lower_info->backend();
+    builder.setNextElem(element);
     auto kernel_gen = graph.backend_resolver()->getBackendContext(backend)->kernel_gen;
-    kernel_gen->generate(*element.subgraph, execution_builder.get());
+    kernel_gen->generate(*element.subgraph, &builder);
   });
 
+  auto code = builder.releaseCode();
+
   for (auto &tensor_builder : tensor_builders)
   {
     tensor_builder->allocateConsts();
@@ -157,15 +156,16 @@ exec::IExecutor *ExecutorFactory::createLinearExecutor(graph::Graph &graph)
     graph.backend_resolver()->getBackendContext(backend)->constant_initializer->run();
   }
 
-  function_sequence->iterate([&](exec::IFunction &ifunc) {
+  for (auto &&e : code)
+  {
     // NOTE. It may need avoiding prepare() for some operations
     // Ref: https://github.sec.samsung.net/STAR/nnfw/issues/7326
-    ifunc.prepare();
+    e.fn->prepare();
     for (auto &tensor_builder : tensor_builders)
     {
       tensor_builder->postFunctionPrepare();
     }
-  });
+  }
 
   for (auto &tensor_builder : tensor_builders)
   {
@@ -193,9 +193,19 @@ exec::IExecutor *ExecutorFactory::createLinearExecutor(graph::Graph &graph)
     tensor_mgrs->insert(tensor_builder->releaseTensorManager());
   }
 
-  return new exec::LinearExecutor{graph.shareModel(),     graph.releaseSubgraphs(),
-                                  operand_context,        graph.releaseLowerInfo(),
-                                  std::move(tensor_mgrs), function_sequence};
+  auto exec =
+      new exec::LinearExecutor{graph.shareModel(),       graph.releaseSubgraphs(), operand_context,
+                               graph.releaseLowerInfo(), std::move(tensor_mgrs),   std::move(code)};
+
+  const std::string trace_filepath = util::getConfigString(util::config::TRACE_FILEPATH);
+  if (!trace_filepath.empty())
+  {
+    std::unique_ptr<exec::IExecutionObserver> ctp =
+        nnfw::cpp14::make_unique<exec::ChromeTracingObserver>(trace_filepath);
+    exec->addObserver(std::move(ctp));
+  }
+
+  return exec;
 }
 
 exec::IExecutor *ExecutorFactory::createDataflowExecutor(graph::Graph &graph, bool parallel)
index 2c3e470..89f12e8 100644 (file)
@@ -50,6 +50,8 @@ public:
     const model::Subgraph *subgraph;
     const graph::operation::LowerInfo *lower_info;
 
+    Element() : subgraph{nullptr}, lower_info{nullptr} {}
+
     Element(const model::Subgraph *subgraph, const graph::operation::LowerInfo *lower_info)
         : subgraph{subgraph}, lower_info{lower_info}
     {
index 35197a2..d87ec40 100644 (file)
@@ -21,7 +21,20 @@ namespace neurun
 namespace exec
 {
 
-void LinearExecutor::executeImpl() { _fn_seq->run(); }
+void LinearExecutor::executeImpl()
+{
+  _subject.notifyModelBegin(this);
+  for (auto &&code : _code)
+  {
+    // FIXME Assumes only one operation in a subgraph
+    const auto op = code.elem.subgraph->operations().at(0).node;
+    const auto backend = code.elem.lower_info->backend();
+    _subject.notifyJobBegin(this, op, backend);
+    code.fn->run();
+    _subject.notifyJobEnd(this, op, backend);
+  }
+  _subject.notifyModelEnd(this);
+}
 
 } // namespace exec
 } // namespace neurun
index 1369470..401e55f 100644 (file)
@@ -25,6 +25,7 @@
 #include "ExecutorBase.h"
 #include "compiler/Linear.h"
 #include "exec/FunctionSequence.h"
+#include "compiler/CodeWithInfo.h"
 
 namespace neurun
 {
@@ -47,10 +48,10 @@ public:
                  const std::shared_ptr<compiler::OperandContext> &operand_context,
                  std::unique_ptr<graph::LowerInfoMap> lower_info,
                  std::unique_ptr<backend::TensorManagerSet> tensor_mgrs,
-                 const std::shared_ptr<exec::FunctionSequence> &fn_seq)
+                 std::vector<compiler::CodeWithInfo> &&code)
       : ExecutorBase{model, std::move(subgraphs), operand_context, std::move(lower_info),
                      std::move(tensor_mgrs)},
-        _fn_seq{fn_seq}
+        _code{std::move(code)}
   {
   }
 
@@ -58,7 +59,7 @@ public:
   void executeImpl(void) override;
 
 private:
-  std::shared_ptr<exec::FunctionSequence> _fn_seq;
+  std::vector<compiler::CodeWithInfo> _code;
 };
 
 } // namespace exec