[neurun] Encapsulate Linear IR (#2350)
author이한종/동작제어Lab(SR)/Engineer/삼성전자 <hanjoung.lee@samsung.com>
Mon, 20 Aug 2018 02:55:08 +0000 (11:55 +0900)
committer박세희/동작제어Lab(SR)/Principal Engineer/삼성전자 <saehie.park@samsung.com>
Mon, 20 Aug 2018 02:55:08 +0000 (11:55 +0900)
So far Linear IR was a vector of Node elements but this commit
encapsulates it into a class. Also this commit introduces a new phase
for Graph `LINEARIZED` which indicates that the Graph IR is no longer
valid and all data structures are moved to `Linear` object. For now
nothing is moved yet, but will in the future.

Resolve #2320

Signed-off-by: Hanjoung Lee <hanjoung.lee@samsung.com>
runtimes/neurun/CMakeLists.txt
runtimes/neurun/src/compilation.cc
runtimes/neurun/src/graph/Graph.cc
runtimes/neurun/src/graph/Graph.h
runtimes/neurun/src/linear/Linear.cc [new file with mode: 0644]
runtimes/neurun/src/linear/Linear.h [new file with mode: 0644]

index df3a820..3dc0d03 100644 (file)
@@ -16,10 +16,11 @@ file(GLOB SOURCES "src/*.cc")
 file(GLOB SOURCES_FRONTEND "src/frontend/*.cc")
 file(GLOB_RECURSE SOURCES_INTERNAL "src/internal/*.cc")
 file(GLOB_RECURSE SOURCES_GRAPH "src/graph/*.cc")
+file(GLOB_RECURSE SOURCES_LINEAR "src/linear/*.cc")
 file(GLOB_RECURSE SOURCES_CODEGEN "src/codegen/*.cc")
 file(GLOB_RECURSE SOURCES_VERIFIER "src/verifier/*.cc")
 
-set(SOURCES ${SOURCES} ${SOURCES_FRONTEND} ${SOURCES_INTERNAL} ${SOURCES_GRAPH} ${SOURCES_CODEGEN} ${SOURCES_VERIFIER})
+set(SOURCES ${SOURCES} ${SOURCES_FRONTEND} ${SOURCES_INTERNAL} ${SOURCES_GRAPH} ${SOURCES_LINEAR} ${SOURCES_CODEGEN} ${SOURCES_VERIFIER})
 
 # NOTE For now ARMCompute is necessary
 # TODO Remove required package below(should be optional)
index d7da83d..f171899 100644 (file)
@@ -29,6 +29,8 @@
 #include "codegen/TensorMarker.h"
 #include "codegen/PlanBuilder.h"
 
+#include "linear/Linear.h"
+
 int ANeuralNetworksCompilation::finish()
 {
   arm_compute::CLScheduler::get().default_init();
@@ -37,35 +39,16 @@ int ANeuralNetworksCompilation::finish()
   const auto &operands = plan.model().operands();
 
   plan.model().lower();
-
-  // Get linearized ops
-  std::vector<const ::internal::tflite::op::Node *> operations;
-  {
-    plan.model().iteratePostDfs([&](const neurun::graph::operation::Node &node) {
-      auto op = node.op();
-      operations.emplace_back(op);
-      //      dynamic_cast<const ::internal::tflite::op::Conv2D::implicit::Node*>(op)
-    });
-
-    std::reverse(std::begin(operations), std::end(operations));
-  }
+  auto linear = plan.model().linearize();
 
   // Dump ops
-  for (const auto op : operations)
-  {
-    op->accept(neurun::codegen::Dumper{});
-  }
+  linear->accept(neurun::codegen::Dumper{});
 
   ::internal::BackendManager backend_manager{plan};
   neurun::codegen::BackendResolver backend_resolver{backend_manager};
   neurun::codegen::PlanBuilder plan_builder{plan};
 
-  for (uint32_t n = 0; n < operations.size(); ++n)
-  {
-    const auto &op = *operations.at(n);
-    auto tensor_builder = backend_resolver.getTensorBuilder(typeid(op));
-    op.accept(neurun::codegen::TensorMarker{*tensor_builder});
-  }
+  linear->markTensors(backend_resolver);
 
 #if 0 // Tensor Conversion disabled
   auto tensor_builders = backend_resolver.getAllTensorBuilders();
@@ -76,11 +59,7 @@ int ANeuralNetworksCompilation::finish()
   }
 #endif
 
-  for (uint32_t n = 0; n < operations.size(); ++n)
-  {
-    const auto &op = *operations.at(n);
-    op.accept(neurun::codegen::Planner{operands, plan_builder, backend_resolver});
-  }
+  linear->accept(neurun::codegen::Planner{operands, plan_builder, backend_resolver});
 
   // TODO Add optimization passes
   plan_builder.finalize(backend_resolver);
index 16d16aa..9d161e8 100644 (file)
@@ -5,6 +5,8 @@
 
 #include "logging.h"
 #include "verifier/IVerifier.h"
+#include "nnfw/std/memory.h"
+#include "linear/Linear.h"
 
 namespace neurun
 {
@@ -105,5 +107,18 @@ void Graph::iteratePostDfs(const std::function<void(const operation::Node &)> &f
   assert(std::all_of(visited.begin(), visited.end(), [](bool v) { return v; }));
 }
 
+std::unique_ptr<linear::Linear> Graph::linearize(void)
+{
+  assert(_phase == Phase::LOWERED);
+
+  auto linear = nnfw::make_unique<linear::Linear>(*this);
+
+  // TODO Move the operations and operands to linear object
+
+  _phase = Phase::LINEARIZED;
+
+  return std::move(linear);
+}
+
 } // namespace graph
 } // namespace neurun
index 823cd18..85db7df 100644 (file)
 
 namespace neurun
 {
+namespace linear
+{
+class Linear;
+} // namespace linear
+} // namespace neurun
+
+namespace neurun
+{
 namespace graph
 {
 
@@ -20,7 +28,8 @@ private:
   {
     BUILDING,
     MODEL,
-    LOWERED
+    LOWERED,
+    LINEARIZED // Everything is moved to Linear object so this Graph object is no longer effective
   };
 
 public:
@@ -36,6 +45,7 @@ public:
   void addOutput(const operand::Index &ind);
   void finishBuilding(void);
   void lower(void);
+  std::unique_ptr<linear::Linear> linearize(void);
   bool isBuildingPhase(void) { return _phase == Phase::BUILDING; }
 
   // Accessors
diff --git a/runtimes/neurun/src/linear/Linear.cc b/runtimes/neurun/src/linear/Linear.cc
new file mode 100644 (file)
index 0000000..73c1fb2
--- /dev/null
@@ -0,0 +1,49 @@
+#include "Linear.h"
+
+#include "graph/Graph.h"
+#include "internal/op/Node.h"
+
+#include "codegen/TensorMarker.h"
+#include "codegen/BackendResolver.h"
+
+namespace neurun
+{
+namespace linear
+{
+
+Linear::Linear(const graph::Graph &graph)
+{
+  // Linearize with topological sort
+  //
+  // Topological sort algorithm
+  //   1. Iterate with DFS
+  //   2. Append the node to vector when DFS for the node finishes(post order)
+  //   3. Reverse the order of nodes
+
+  graph.iteratePostDfs([&](const neurun::graph::operation::Node &node) {
+    auto op = node.op();
+    _operations.emplace_back(op);
+  });
+
+  std::reverse(std::begin(_operations), std::end(_operations));
+}
+
+void Linear::accept(::internal::tflite::op::NodeVisitor &&visitor) const
+{
+  for (const auto op : _operations)
+  {
+    op->accept(std::move(visitor));
+  }
+}
+
+void Linear::markTensors(neurun::codegen::BackendResolver &resolver) const
+{
+  for (const auto op : _operations)
+  {
+    auto tensor_builder = resolver.getTensorBuilder(typeid(*op));
+    op->accept(neurun::codegen::TensorMarker{*tensor_builder});
+  }
+}
+
+} // namespace linear
+} // namespace neurun
diff --git a/runtimes/neurun/src/linear/Linear.h b/runtimes/neurun/src/linear/Linear.h
new file mode 100644 (file)
index 0000000..863101f
--- /dev/null
@@ -0,0 +1,62 @@
+#ifndef __NEURUN_LINEAR_LINEAR_H__
+#define __NEURUN_LINEAR_LINEAR_H__
+
+#include <vector>
+
+#include "internal/op/Node.h"
+
+namespace internal
+{
+namespace tflite
+{
+namespace op
+{
+struct NodeVisitor;
+} // namespace op
+} // namespace tflite
+} // namespace internal
+
+namespace neurun
+{
+namespace graph
+{
+class Graph;
+} // namespace graph
+} // namespace neurun
+
+namespace neurun
+{
+namespace codegen
+{
+class BackendResolver;
+} // namespace codegen
+} // namespace neurun
+
+namespace neurun
+{
+namespace linear
+{
+
+class Linear : ::internal::tflite::op::Node
+{
+public:
+  Linear(const graph::Graph &graph);
+
+public:
+  Linear(const Linear &linear) = delete;
+
+public:
+  virtual void accept(::internal::tflite::op::NodeVisitor &&) const;
+
+  // TODO Remove this since tensor marking will be replaced with another way
+  virtual void markTensors(neurun::codegen::BackendResolver &) const;
+
+public:
+private:
+  std::vector<const ::internal::tflite::op::Node *> _operations;
+};
+
+} // namespace linear
+} // namespace neurun
+
+#endif // __NEURUN_LINEAR_LINEAR_H__