file(GLOB SOURCES_FRONTEND "src/frontend/*.cc")
file(GLOB_RECURSE SOURCES_INTERNAL "src/internal/*.cc")
file(GLOB_RECURSE SOURCES_GRAPH "src/graph/*.cc")
+file(GLOB_RECURSE SOURCES_LINEAR "src/linear/*.cc")
file(GLOB_RECURSE SOURCES_CODEGEN "src/codegen/*.cc")
file(GLOB_RECURSE SOURCES_VERIFIER "src/verifier/*.cc")
-set(SOURCES ${SOURCES} ${SOURCES_FRONTEND} ${SOURCES_INTERNAL} ${SOURCES_GRAPH} ${SOURCES_CODEGEN} ${SOURCES_VERIFIER})
+set(SOURCES ${SOURCES} ${SOURCES_FRONTEND} ${SOURCES_INTERNAL} ${SOURCES_GRAPH} ${SOURCES_LINEAR} ${SOURCES_CODEGEN} ${SOURCES_VERIFIER})
# NOTE For now ARMCompute is necessary
# TODO Remove required package below(should be optional)
#include "codegen/TensorMarker.h"
#include "codegen/PlanBuilder.h"
+#include "linear/Linear.h"
+
int ANeuralNetworksCompilation::finish()
{
arm_compute::CLScheduler::get().default_init();
const auto &operands = plan.model().operands();
plan.model().lower();
-
- // Get linearized ops
- std::vector<const ::internal::tflite::op::Node *> operations;
- {
- plan.model().iteratePostDfs([&](const neurun::graph::operation::Node &node) {
- auto op = node.op();
- operations.emplace_back(op);
- // dynamic_cast<const ::internal::tflite::op::Conv2D::implicit::Node*>(op)
- });
-
- std::reverse(std::begin(operations), std::end(operations));
- }
+ auto linear = plan.model().linearize();
// Dump ops
- for (const auto op : operations)
- {
- op->accept(neurun::codegen::Dumper{});
- }
+ linear->accept(neurun::codegen::Dumper{});
::internal::BackendManager backend_manager{plan};
neurun::codegen::BackendResolver backend_resolver{backend_manager};
neurun::codegen::PlanBuilder plan_builder{plan};
- for (uint32_t n = 0; n < operations.size(); ++n)
- {
- const auto &op = *operations.at(n);
- auto tensor_builder = backend_resolver.getTensorBuilder(typeid(op));
- op.accept(neurun::codegen::TensorMarker{*tensor_builder});
- }
+ linear->markTensors(backend_resolver);
#if 0 // Tensor Conversion disabled
auto tensor_builders = backend_resolver.getAllTensorBuilders();
}
#endif
- for (uint32_t n = 0; n < operations.size(); ++n)
- {
- const auto &op = *operations.at(n);
- op.accept(neurun::codegen::Planner{operands, plan_builder, backend_resolver});
- }
+ linear->accept(neurun::codegen::Planner{operands, plan_builder, backend_resolver});
// TODO Add optimization passes
plan_builder.finalize(backend_resolver);
#include "logging.h"
#include "verifier/IVerifier.h"
+#include "nnfw/std/memory.h"
+#include "linear/Linear.h"
namespace neurun
{
assert(std::all_of(visited.begin(), visited.end(), [](bool v) { return v; }));
}
+std::unique_ptr<linear::Linear> Graph::linearize(void)
+{
+ assert(_phase == Phase::LOWERED);
+
+ auto linear = nnfw::make_unique<linear::Linear>(*this);
+
+ // TODO Move the operations and operands to linear object
+
+ _phase = Phase::LINEARIZED;
+
+ return std::move(linear);
+}
+
} // namespace graph
} // namespace neurun
namespace neurun
{
+namespace linear
+{
+class Linear;
+} // namespace linear
+} // namespace neurun
+
+namespace neurun
+{
namespace graph
{
{
BUILDING,
MODEL,
- LOWERED
+ LOWERED,
+ LINEARIZED // Everything is moved to Linear object so this Graph object is no longer effective
};
public:
void addOutput(const operand::Index &ind);
void finishBuilding(void);
void lower(void);
+ std::unique_ptr<linear::Linear> linearize(void);
bool isBuildingPhase(void) { return _phase == Phase::BUILDING; }
// Accessors
--- /dev/null
+#include "Linear.h"
+
+#include "graph/Graph.h"
+#include "internal/op/Node.h"
+
+#include "codegen/TensorMarker.h"
+#include "codegen/BackendResolver.h"
+
+namespace neurun
+{
+namespace linear
+{
+
+Linear::Linear(const graph::Graph &graph)
+{
+ // Linearize with topological sort
+ //
+ // Topological sort algorithm
+ // 1. Iterate with DFS
+ // 2. Append the node to vector when DFS for the node finishes(post order)
+ // 3. Reverse the order of nodes
+
+ graph.iteratePostDfs([&](const neurun::graph::operation::Node &node) {
+ auto op = node.op();
+ _operations.emplace_back(op);
+ });
+
+ std::reverse(std::begin(_operations), std::end(_operations));
+}
+
+void Linear::accept(::internal::tflite::op::NodeVisitor &&visitor) const
+{
+ for (const auto op : _operations)
+ {
+ op->accept(std::move(visitor));
+ }
+}
+
+void Linear::markTensors(neurun::codegen::BackendResolver &resolver) const
+{
+ for (const auto op : _operations)
+ {
+ auto tensor_builder = resolver.getTensorBuilder(typeid(*op));
+ op->accept(neurun::codegen::TensorMarker{*tensor_builder});
+ }
+}
+
+} // namespace linear
+} // namespace neurun
--- /dev/null
+#ifndef __NEURUN_LINEAR_LINEAR_H__
+#define __NEURUN_LINEAR_LINEAR_H__
+
+#include <vector>
+
+#include "internal/op/Node.h"
+
+namespace internal
+{
+namespace tflite
+{
+namespace op
+{
+struct NodeVisitor;
+} // namespace op
+} // namespace tflite
+} // namespace internal
+
+namespace neurun
+{
+namespace graph
+{
+class Graph;
+} // namespace graph
+} // namespace neurun
+
+namespace neurun
+{
+namespace codegen
+{
+class BackendResolver;
+} // namespace codegen
+} // namespace neurun
+
+namespace neurun
+{
+namespace linear
+{
+
+class Linear : ::internal::tflite::op::Node
+{
+public:
+ Linear(const graph::Graph &graph);
+
+public:
+ Linear(const Linear &linear) = delete;
+
+public:
+ virtual void accept(::internal::tflite::op::NodeVisitor &&) const;
+
+ // TODO Remove this since tensor marking will be replaced with another way
+ virtual void markTensors(neurun::codegen::BackendResolver &) const;
+
+public:
+private:
+ std::vector<const ::internal::tflite::op::Node *> _operations;
+};
+
+} // namespace linear
+} // namespace neurun
+
+#endif // __NEURUN_LINEAR_LINEAR_H__