--- /dev/null
+#include "Planner.h"
+
+#include <typeinfo>
+
+#include "internal/Convert.h"
+#include "internal/Model.h"
+#include "graph/operand/Set.h"
+#include "codegen/IPlanBuilder.h"
+#include "codegen/BackendResolver.h"
+
+namespace neurun
+{
+namespace codegen
+{
+
+void Planner::visit(const ::internal::tflite::op::Conv2D::implicit::Node &node)
+{
+ const ::internal::tflite::operand::Index ofm_index{node.param().ofm_index};
+
+ const ::internal::tflite::operand::Index ifm_index{node.param().ifm_index};
+ const ::internal::tflite::operand::Index ker_index{node.param().ker_index};
+ const ::internal::tflite::operand::Index bias_index{node.param().bias_index};
+
+ const auto ofm_shape = _ctx.at(ofm_index).shape().asFeature();
+ const auto ifm_shape = _ctx.at(ifm_index).shape().asFeature();
+ const auto ker_shape = _ctx.at(ker_index).shape().asKernel();
+ const auto bias_size = _ctx.at(bias_index).shape().asVector();
+
+ // Set Shape Constraints
+ _builder.addShapeConstr(ofm_index, ::internal::asTensorInfo(ofm_shape));
+ _builder.addShapeConstr(ifm_index, ::internal::asTensorInfo(ifm_shape));
+ _builder.addShapeConstr(ker_index, ::internal::asTensorInfo(ker_shape));
+ _builder.addShapeConstr(bias_index, ::internal::asTensorInfo(bias_size));
+
+ // Generate Initializers
+ auto init_gen = _backend_resolver.getInitializerGenerator(typeid(node));
+ _builder.addInitializer(ker_index, init_gen->generateWeight(node));
+ _builder.addInitializer(bias_index, init_gen->generateBias(node));
+
+ // Generate Stage
+ auto stage_gen = _backend_resolver.getStageGenerator(typeid(node));
+ _builder.addStage(stage_gen->generate(node));
+}
+
+void Planner::visit(const ::internal::tflite::op::MaxPool2D::implicit::Node &node)
+{
+ const ::internal::tflite::operand::Index ofm_index{node.param().ofm_index};
+ const ::internal::tflite::operand::Index ifm_index{node.param().ifm_index};
+
+ const auto ofm_shape = _ctx.at(ofm_index).shape().asFeature();
+ const auto ifm_shape = _ctx.at(ifm_index).shape().asFeature();
+
+ // Set Shape Constraints
+ _builder.addShapeConstr(ofm_index, ::internal::asTensorInfo(ofm_shape));
+ _builder.addShapeConstr(ifm_index, ::internal::asTensorInfo(ifm_shape));
+
+ // Generate Stage
+ auto stage_gen = _backend_resolver.getStageGenerator(typeid(node));
+ _builder.addStage(stage_gen->generate(node));
+}
+
+void Planner::visit(const ::internal::tflite::op::AvgPool2D::implicit::Node &node)
+{
+ const ::internal::tflite::operand::Index ofm_index{node.param().ofm_index};
+ const ::internal::tflite::operand::Index ifm_index{node.param().ifm_index};
+
+ const auto ofm_shape = _ctx.at(ofm_index).shape().asFeature();
+ const auto ifm_shape = _ctx.at(ifm_index).shape().asFeature();
+
+ // Set Shape Constraints
+ _builder.addShapeConstr(ofm_index, ::internal::asTensorInfo(ofm_shape));
+ _builder.addShapeConstr(ifm_index, ::internal::asTensorInfo(ifm_shape));
+
+ // Generate Stage
+ auto stage_gen = _backend_resolver.getStageGenerator(typeid(node));
+ _builder.addStage(stage_gen->generate(node));
+}
+
+void Planner::visit(const ::internal::tflite::op::Concat::Node &node)
+{
+ const ::internal::tflite::operand::Index ofm_index{node.param().ofm_index};
+
+ // NOTE This implementation assumes that inputs and output are a feature
+ // TODO Remove this assumption
+ const auto ofm_shape = _ctx.at(ofm_index).shape().asFeature();
+
+ // NOTE This implementation assumes concat over feature depth
+ // TODO Remove this assumption
+ assert(_ctx.at(::internal::tflite::operand::Index{node.param().axis_index}).asScalar<int32_t>() ==
+ 3);
+
+ // Set Shape Constraints (for output)
+ _builder.addShapeConstr(ofm_index, ::internal::asTensorInfo(ofm_shape));
+
+ // Set Shape Constraints (for input)
+ uint32_t depth = 0;
+
+ for (const auto &index : node.param().ifm_indexes)
+ {
+ const ::internal::tflite::operand::Index ifm_index{index};
+ const auto ifm_shape = _ctx.at(ifm_index).shape().asFeature();
+ _builder.addShapeConstr(ifm_index, ::internal::asTensorInfo(ifm_shape));
+ }
+
+ // Generate Stage
+ auto stage_gen = _backend_resolver.getStageGenerator(typeid(node));
+ _builder.addStage(stage_gen->generate(node));
+}
+
+void Planner::visit(const ::internal::tflite::op::FullyConnected::Node &node)
+{
+ VERBOSE(FullyConnected) << "Configure FULLY_CONNECTED operation" << std::endl;
+
+ const ::internal::tflite::operand::Index output_index{node.param().output_index};
+
+ const ::internal::tflite::operand::Index input_index{node.param().input_index};
+ const ::internal::tflite::operand::Index weight_index{node.param().weight_index};
+ const ::internal::tflite::operand::Index bias_index{node.param().bias_index};
+
+ const ::internal::tflite::operand::Index activation_index{node.param().activation_index};
+
+ assert(_ctx.at(output_index).shape().rank() == 2);
+ const auto output_size = _ctx.at(output_index).shape().dim(1);
+
+ // NOTE We assume that input is a feature map
+ // TODO Remove this restriction!
+ const auto ifm_shape = _ctx.at(input_index).shape().asFeature();
+
+ assert(_ctx.at(weight_index).shape().rank() == 2);
+ const auto num_output = _ctx.at(weight_index).shape().dim(0);
+ const auto input_size = _ctx.at(weight_index).shape().dim(1);
+ assert(ifm_shape.C * ifm_shape.H * ifm_shape.W == input_size);
+
+ const auto bias_size = _ctx.at(bias_index).shape().asVector();
+
+ // Set Shape Constraints
+ _builder.addShapeConstr(output_index, ::internal::asTensorInfo(output_size));
+ _builder.addShapeConstr(input_index, ::internal::asTensorInfo(ifm_shape));
+ _builder.addShapeConstr(weight_index,
+ ::internal::asTensorInfo(num_output /*H*/, input_size /*W*/));
+ _builder.addShapeConstr(bias_index, ::internal::asTensorInfo(bias_size));
+
+ // Generate Initializers
+ auto init_gen = _backend_resolver.getInitializerGenerator(typeid(node));
+ _builder.addInitializer(weight_index, init_gen->generateWeight(node));
+ _builder.addInitializer(bias_index, init_gen->generateBias(node));
+
+ // Generate Stage
+ auto stage_gen = _backend_resolver.getStageGenerator(typeid(node));
+ _builder.addStage(stage_gen->generate(node));
+}
+
+void Planner::visit(const ::internal::tflite::op::Reshape::Node &node)
+{
+ const ::internal::tflite::operand::Index output_index{node.param().output_index};
+ const ::internal::tflite::operand::Index input_index{node.param().input_index};
+
+ // NOTE The content of a tensor specified by shape_index should be aligned with
+ // output tensor shape
+ // TODO Check consistency of ouput shape
+
+ // 'Feature Map' to 'Vector' reshape
+ assert(_ctx.at(input_index).shape().rank() == 4);
+ assert(_ctx.at(output_index).shape().rank() == 2);
+ assert(_ctx.at(output_index).shape().dim(0) == 1);
+
+ const auto ifm_shape = _ctx.at(input_index).shape().asFeature();
+ const auto out_size = _ctx.at(output_index).shape().dim(1);
+
+ // NOTE Vector element ordering issue arises when H or W is not 1
+ assert(ifm_shape.H == 1);
+ assert(ifm_shape.W == 1);
+ assert((ifm_shape.C * ifm_shape.H * ifm_shape.W) == out_size);
+
+ _builder.addShapeConstr(output_index, ::internal::asTensorInfo(out_size));
+ _builder.addShapeConstr(input_index, ::internal::asTensorInfo(ifm_shape));
+
+ // Generate Stage
+ auto stage_gen = _backend_resolver.getStageGenerator(typeid(node));
+ _builder.addStage(stage_gen->generate(node));
+}
+
+void Planner::visit(const ::internal::tflite::op::Softmax::Node &node)
+{
+ VERBOSE(Softmax) << "Configure SOFTMAX operation" << std::endl;
+
+ const ::internal::tflite::operand::Index output_index{node.param().output_index};
+ const ::internal::tflite::operand::Index input_index{node.param().input_index};
+
+ assert(_ctx.at(output_index).shape().rank() == _ctx.at(input_index).shape().rank());
+
+ // TODO Support 'feature map' input
+ assert(_ctx.at(input_index).shape().rank() == 2);
+ assert(_ctx.at(input_index).shape().dim(0) == 1);
+ assert(_ctx.at(input_index).shape().dim(0) == _ctx.at(output_index).shape().dim(0));
+ assert(_ctx.at(input_index).shape().dim(1) == _ctx.at(output_index).shape().dim(1));
+
+ const uint32_t len = _ctx.at(output_index).shape().dim(1);
+
+ _builder.addShapeConstr(output_index, ::internal::asTensorInfo(len));
+ _builder.addShapeConstr(input_index, ::internal::asTensorInfo(len));
+
+ // Generate Stage
+ auto stage_gen = _backend_resolver.getStageGenerator(typeid(node));
+ _builder.addStage(stage_gen->generate(node));
+}
+
+void Planner::visit(const ::internal::tflite::op::TensorConvert::CpuFromCommon::Node &node)
+{
+ auto stage_gen = _backend_resolver.getStageGenerator(typeid(node));
+ _builder.addStage(stage_gen->generate(node));
+}
+
+void Planner::visit(const ::internal::tflite::op::TensorConvert::CpuToCommon::Node &node)
+{
+ auto stage_gen = _backend_resolver.getStageGenerator(typeid(node));
+ _builder.addStage(stage_gen->generate(node));
+}
+
+void Planner::visit(const ::internal::tflite::op::TensorConvert::AclFromCommon::Node &node)
+{
+ auto stage_gen = _backend_resolver.getStageGenerator(typeid(node));
+ _builder.addStage(stage_gen->generate(node));
+}
+
+void Planner::visit(const ::internal::tflite::op::TensorConvert::AclToCommon::Node &node)
+{
+ auto stage_gen = _backend_resolver.getStageGenerator(typeid(node));
+ _builder.addStage(stage_gen->generate(node));
+}
+
+} // namespace codegen
+} // namespace neurun
#include "codegen/IPlanBuilder.h"
#include "codegen/BackendResolver.h"
-
-class Planner : public ::internal::tflite::op::NodeVisitor
-{
-public:
- Planner(const neurun::graph::operand::Set &ctx, neurun::codegen::IPlanBuilder &builder,
- neurun::codegen::BackendResolver &backend_resolver)
- : _ctx{ctx}, _builder{builder}, _backend_resolver(backend_resolver)
- {
- }
-
-public:
- void visit(const ::internal::tflite::op::Conv2D::implicit::Node &node) override;
- void visit(const ::internal::tflite::op::MaxPool2D::implicit::Node &node) override;
- void visit(const ::internal::tflite::op::AvgPool2D::implicit::Node &node) override;
- void visit(const ::internal::tflite::op::Concat::Node &node) override;
- void visit(const ::internal::tflite::op::FullyConnected::Node &node) override;
- void visit(const ::internal::tflite::op::Reshape::Node &node) override;
- void visit(const ::internal::tflite::op::Softmax::Node &node) override;
- void visit(const ::internal::tflite::op::TensorConvert::CpuFromCommon::Node &node) override;
- void visit(const ::internal::tflite::op::TensorConvert::CpuToCommon::Node &node) override;
- void visit(const ::internal::tflite::op::TensorConvert::AclFromCommon::Node &node) override;
- void visit(const ::internal::tflite::op::TensorConvert::AclToCommon::Node &node) override;
-
-private:
- const neurun::graph::operand::Set &_ctx;
- neurun::codegen::IPlanBuilder &_builder;
- neurun::codegen::BackendResolver &_backend_resolver;
-};
-
-void Planner::visit(const ::internal::tflite::op::Conv2D::implicit::Node &node)
-{
- const ::internal::tflite::operand::Index ofm_index{node.param().ofm_index};
-
- const ::internal::tflite::operand::Index ifm_index{node.param().ifm_index};
- const ::internal::tflite::operand::Index ker_index{node.param().ker_index};
- const ::internal::tflite::operand::Index bias_index{node.param().bias_index};
-
- const auto ofm_shape = _ctx.at(ofm_index).shape().asFeature();
- const auto ifm_shape = _ctx.at(ifm_index).shape().asFeature();
- const auto ker_shape = _ctx.at(ker_index).shape().asKernel();
- const auto bias_size = _ctx.at(bias_index).shape().asVector();
-
- // Set Shape Constraints
- _builder.addShapeConstr(ofm_index, ::internal::asTensorInfo(ofm_shape));
- _builder.addShapeConstr(ifm_index, ::internal::asTensorInfo(ifm_shape));
- _builder.addShapeConstr(ker_index, ::internal::asTensorInfo(ker_shape));
- _builder.addShapeConstr(bias_index, ::internal::asTensorInfo(bias_size));
-
- // Generate Initializers
- auto init_gen = _backend_resolver.getInitializerGenerator(typeid(node));
- _builder.addInitializer(ker_index, init_gen->generateWeight(node));
- _builder.addInitializer(bias_index, init_gen->generateBias(node));
-
- // Generate Stage
- auto stage_gen = _backend_resolver.getStageGenerator(typeid(node));
- _builder.addStage(stage_gen->generate(node));
-}
-
-void Planner::visit(const ::internal::tflite::op::MaxPool2D::implicit::Node &node)
-{
- const ::internal::tflite::operand::Index ofm_index{node.param().ofm_index};
- const ::internal::tflite::operand::Index ifm_index{node.param().ifm_index};
-
- const auto ofm_shape = _ctx.at(ofm_index).shape().asFeature();
- const auto ifm_shape = _ctx.at(ifm_index).shape().asFeature();
-
- // Set Shape Constraints
- _builder.addShapeConstr(ofm_index, ::internal::asTensorInfo(ofm_shape));
- _builder.addShapeConstr(ifm_index, ::internal::asTensorInfo(ifm_shape));
-
- // Generate Stage
- auto stage_gen = _backend_resolver.getStageGenerator(typeid(node));
- _builder.addStage(stage_gen->generate(node));
-}
-
-void Planner::visit(const ::internal::tflite::op::AvgPool2D::implicit::Node &node)
-{
- const ::internal::tflite::operand::Index ofm_index{node.param().ofm_index};
- const ::internal::tflite::operand::Index ifm_index{node.param().ifm_index};
-
- const auto ofm_shape = _ctx.at(ofm_index).shape().asFeature();
- const auto ifm_shape = _ctx.at(ifm_index).shape().asFeature();
-
- // Set Shape Constraints
- _builder.addShapeConstr(ofm_index, ::internal::asTensorInfo(ofm_shape));
- _builder.addShapeConstr(ifm_index, ::internal::asTensorInfo(ifm_shape));
-
- // Generate Stage
- auto stage_gen = _backend_resolver.getStageGenerator(typeid(node));
- _builder.addStage(stage_gen->generate(node));
-}
-
-void Planner::visit(const ::internal::tflite::op::Concat::Node &node)
-{
- const ::internal::tflite::operand::Index ofm_index{node.param().ofm_index};
-
- // NOTE This implementation assumes that inputs and output are a feature
- // TODO Remove this assumption
- const auto ofm_shape = _ctx.at(ofm_index).shape().asFeature();
-
- // NOTE This implementation assumes concat over feature depth
- // TODO Remove this assumption
- assert(_ctx.at(::internal::tflite::operand::Index{node.param().axis_index}).asScalar<int32_t>() ==
- 3);
-
- // Set Shape Constraints (for output)
- _builder.addShapeConstr(ofm_index, ::internal::asTensorInfo(ofm_shape));
-
- // Set Shape Constraints (for input)
- uint32_t depth = 0;
-
- for (const auto &index : node.param().ifm_indexes)
- {
- const ::internal::tflite::operand::Index ifm_index{index};
- const auto ifm_shape = _ctx.at(ifm_index).shape().asFeature();
- _builder.addShapeConstr(ifm_index, ::internal::asTensorInfo(ifm_shape));
- }
-
- // Generate Stage
- auto stage_gen = _backend_resolver.getStageGenerator(typeid(node));
- _builder.addStage(stage_gen->generate(node));
-}
-
-void Planner::visit(const ::internal::tflite::op::FullyConnected::Node &node)
-{
- VERBOSE(FullyConnected) << "Configure FULLY_CONNECTED operation" << std::endl;
-
- const ::internal::tflite::operand::Index output_index{node.param().output_index};
-
- const ::internal::tflite::operand::Index input_index{node.param().input_index};
- const ::internal::tflite::operand::Index weight_index{node.param().weight_index};
- const ::internal::tflite::operand::Index bias_index{node.param().bias_index};
-
- const ::internal::tflite::operand::Index activation_index{node.param().activation_index};
-
- assert(_ctx.at(output_index).shape().rank() == 2);
- const auto output_size = _ctx.at(output_index).shape().dim(1);
-
- // NOTE We assume that input is a feature map
- // TODO Remove this restriction!
- const auto ifm_shape = _ctx.at(input_index).shape().asFeature();
-
- assert(_ctx.at(weight_index).shape().rank() == 2);
- const auto num_output = _ctx.at(weight_index).shape().dim(0);
- const auto input_size = _ctx.at(weight_index).shape().dim(1);
- assert(ifm_shape.C * ifm_shape.H * ifm_shape.W == input_size);
-
- const auto bias_size = _ctx.at(bias_index).shape().asVector();
-
- // Set Shape Constraints
- _builder.addShapeConstr(output_index, ::internal::asTensorInfo(output_size));
- _builder.addShapeConstr(input_index, ::internal::asTensorInfo(ifm_shape));
- _builder.addShapeConstr(weight_index,
- ::internal::asTensorInfo(num_output /*H*/, input_size /*W*/));
- _builder.addShapeConstr(bias_index, ::internal::asTensorInfo(bias_size));
-
- // Generate Initializers
- auto init_gen = _backend_resolver.getInitializerGenerator(typeid(node));
- _builder.addInitializer(weight_index, init_gen->generateWeight(node));
- _builder.addInitializer(bias_index, init_gen->generateBias(node));
-
- // Generate Stage
- auto stage_gen = _backend_resolver.getStageGenerator(typeid(node));
- _builder.addStage(stage_gen->generate(node));
-}
-
-void Planner::visit(const ::internal::tflite::op::Reshape::Node &node)
-{
- const ::internal::tflite::operand::Index output_index{node.param().output_index};
- const ::internal::tflite::operand::Index input_index{node.param().input_index};
-
- // NOTE The content of a tensor specified by shape_index should be aligned with
- // output tensor shape
- // TODO Check consistency of ouput shape
-
- // 'Feature Map' to 'Vector' reshape
- assert(_ctx.at(input_index).shape().rank() == 4);
- assert(_ctx.at(output_index).shape().rank() == 2);
- assert(_ctx.at(output_index).shape().dim(0) == 1);
-
- const auto ifm_shape = _ctx.at(input_index).shape().asFeature();
- const auto out_size = _ctx.at(output_index).shape().dim(1);
-
- // NOTE Vector element ordering issue arises when H or W is not 1
- assert(ifm_shape.H == 1);
- assert(ifm_shape.W == 1);
- assert((ifm_shape.C * ifm_shape.H * ifm_shape.W) == out_size);
-
- _builder.addShapeConstr(output_index, ::internal::asTensorInfo(out_size));
- _builder.addShapeConstr(input_index, ::internal::asTensorInfo(ifm_shape));
-
- // Generate Stage
- auto stage_gen = _backend_resolver.getStageGenerator(typeid(node));
- _builder.addStage(stage_gen->generate(node));
-}
-
-void Planner::visit(const ::internal::tflite::op::Softmax::Node &node)
-{
- VERBOSE(Softmax) << "Configure SOFTMAX operation" << std::endl;
-
- const ::internal::tflite::operand::Index output_index{node.param().output_index};
- const ::internal::tflite::operand::Index input_index{node.param().input_index};
-
- assert(_ctx.at(output_index).shape().rank() == _ctx.at(input_index).shape().rank());
-
- // TODO Support 'feature map' input
- assert(_ctx.at(input_index).shape().rank() == 2);
- assert(_ctx.at(input_index).shape().dim(0) == 1);
- assert(_ctx.at(input_index).shape().dim(0) == _ctx.at(output_index).shape().dim(0));
- assert(_ctx.at(input_index).shape().dim(1) == _ctx.at(output_index).shape().dim(1));
-
- const uint32_t len = _ctx.at(output_index).shape().dim(1);
-
- _builder.addShapeConstr(output_index, ::internal::asTensorInfo(len));
- _builder.addShapeConstr(input_index, ::internal::asTensorInfo(len));
-
- // Generate Stage
- auto stage_gen = _backend_resolver.getStageGenerator(typeid(node));
- _builder.addStage(stage_gen->generate(node));
-}
-
-void Planner::visit(const ::internal::tflite::op::TensorConvert::CpuFromCommon::Node &node)
-{
- auto stage_gen = _backend_resolver.getStageGenerator(typeid(node));
- _builder.addStage(stage_gen->generate(node));
-}
-
-void Planner::visit(const ::internal::tflite::op::TensorConvert::CpuToCommon::Node &node)
-{
- auto stage_gen = _backend_resolver.getStageGenerator(typeid(node));
- _builder.addStage(stage_gen->generate(node));
-}
-
-void Planner::visit(const ::internal::tflite::op::TensorConvert::AclFromCommon::Node &node)
-{
- auto stage_gen = _backend_resolver.getStageGenerator(typeid(node));
- _builder.addStage(stage_gen->generate(node));
-}
-
-void Planner::visit(const ::internal::tflite::op::TensorConvert::AclToCommon::Node &node)
-{
- auto stage_gen = _backend_resolver.getStageGenerator(typeid(node));
- _builder.addStage(stage_gen->generate(node));
-}
+#include "codegen/Planner.h"
class TensorMarker : public ::internal::tflite::op::NodeVisitor
{
for (uint32_t n = 0; n < operations.size(); ++n)
{
const auto &op = *operations.at(n);
- op.accept(Planner{operands, plan_builder, backend_resolver});
+ op.accept(neurun::codegen::Planner{operands, plan_builder, backend_resolver});
}
// TODO Add optimization passes