Merge Old Node into New Node.
Old Node : `internal::tflite::op::Node`
New Node : `neurun::graph::operation::Node`
- struct `Param` for each Node holds non-tensor parameters only
- Tensor parameters are stored in `_inputs` and `_outputs`
- Provide a default implementation of get/setters for inputs and
outputs of `Node`
- `NodeVisitors` are now for New Node, not Old Node
- class `Linear` now holds New Node rather than Old Node
This commit completes the draft commit #2479, with #2483 and #2486
which were already merged.
Resolve #2404
Signed-off-by: Hanjoung Lee <hanjoung.lee@samsung.com>
#include "arm_compute/core/ITensor.h"
-#include "internal/op/Conv2D.h"
-#include "internal/op/FullyConnected.h"
+#include "graph/operation/Conv2D.h"
+#include "graph/operation/FullyConnected.h"
using Initializer = std::function<void(::arm_compute::ITensor &)>;
{
virtual ~IInitializerGenerator() = default;
- virtual Initializer
- generateWeight(const ::internal::tflite::op::Conv2D::implicit::Node &node) = 0;
- virtual Initializer generateWeight(const ::internal::tflite::op::FullyConnected::Node &node) = 0;
+ virtual Initializer generateWeight(const graph::operation::Conv2D::Implicit::Node &node) = 0;
+ virtual Initializer generateWeight(const graph::operation::FullyConnected::Node &node) = 0;
- virtual Initializer generateBias(const ::internal::tflite::op::Conv2D::implicit::Node &node) = 0;
- virtual Initializer generateBias(const ::internal::tflite::op::FullyConnected::Node &node) = 0;
+ virtual Initializer generateBias(const graph::operation::Conv2D::Implicit::Node &node) = 0;
+ virtual Initializer generateBias(const graph::operation::FullyConnected::Node &node) = 0;
};
} // namespace backend
#include <arm_compute/runtime/IFunction.h>
#include "backend/ITensorBuilder.h"
-#include "internal/op/Conv2D.h"
-#include "internal/op/MaxPool2D.h"
-#include "internal/op/AvgPool2D.h"
-#include "internal/op/Concat.h"
-#include "internal/op/FullyConnected.h"
-#include "internal/op/Reshape.h"
-#include "internal/op/Softmax.h"
-#include "internal/op/NOP.h"
+#include "graph/operation/Conv2D.h"
+#include "graph/operation/MaxPool2D.h"
+#include "graph/operation/AvgPool2D.h"
+#include "graph/operation/Concat.h"
+#include "graph/operation/FullyConnected.h"
+#include "graph/operation/Reshape.h"
+#include "graph/operation/Softmax.h"
+#include "graph/operation/NOP.h"
struct IExecutionBuilder
{
virtual std::shared_ptr<ITensorBuilder> tensor_builder() = 0;
- virtual Stage generate(const ::internal::tflite::op::Conv2D::implicit::Node &node) = 0;
- virtual Stage generate(const ::internal::tflite::op::MaxPool2D::implicit::Node &node) = 0;
- virtual Stage generate(const ::internal::tflite::op::AvgPool2D::implicit::Node &node) = 0;
- virtual Stage generate(const ::internal::tflite::op::Concat::Node &node) = 0;
- virtual Stage generate(const ::internal::tflite::op::FullyConnected::Node &node) = 0;
- virtual Stage generate(const ::internal::tflite::op::Reshape::Node &node) = 0;
- virtual Stage generate(const ::internal::tflite::op::Softmax::Node &node) = 0;
- virtual Stage generate(const ::internal::tflite::op::NOP::Node &node) = 0;
+ virtual Stage generate(const graph::operation::Conv2D::Implicit::Node &node) = 0;
+ virtual Stage generate(const graph::operation::MaxPool2D::Implicit::Node &node) = 0;
+ virtual Stage generate(const graph::operation::AvgPool2D::Implicit::Node &node) = 0;
+ virtual Stage generate(const graph::operation::Concat::Node &node) = 0;
+ virtual Stage generate(const graph::operation::FullyConnected::Node &node) = 0;
+ virtual Stage generate(const graph::operation::Reshape::Node &node) = 0;
+ virtual Stage generate(const graph::operation::Softmax::Node &node) = 0;
+ virtual Stage generate(const graph::operation::NOP::Node &node) = 0;
};
} // namespace backend
}
Initializer
-InitializerGenerator::generateWeight(const ::internal::tflite::op::Conv2D::implicit::Node &node)
+InitializerGenerator::generateWeight(const graph::operation::Conv2D::Implicit::Node &node)
{
- const ::neurun::graph::operand::Index ker_index{node.param().ker_index};
+ const ::neurun::graph::operand::Index ker_index{node.getInputs().at(1)};
const auto ker_shape = _ctx.at(ker_index).shape().asKernel();
auto ker_base = _ctx.at(ker_index).data().base();
};
}
-Initializer
-InitializerGenerator::generateWeight(const ::internal::tflite::op::FullyConnected::Node &node)
+Initializer InitializerGenerator::generateWeight(const graph::operation::FullyConnected::Node &node)
{
- const ::neurun::graph::operand::Index weight_index{node.param().weight_index};
- const ::neurun::graph::operand::Index input_index{node.param().input_index};
+ const ::neurun::graph::operand::Index weight_index{node.getInputs().at(1)};
+ const ::neurun::graph::operand::Index input_index{node.getInputs().at(0)};
const auto num_output = _ctx.at(weight_index).shape().dim(0);
auto weight_base = _ctx.at(weight_index).data().base();
};
}
-Initializer
-InitializerGenerator::generateBias(const ::internal::tflite::op::Conv2D::implicit::Node &node)
+Initializer InitializerGenerator::generateBias(const graph::operation::Conv2D::Implicit::Node &node)
{
// TODO Refactor so we can reuse the common code
- const ::neurun::graph::operand::Index bias_index{node.param().bias_index};
+ const ::neurun::graph::operand::Index bias_index{node.getInputs().at(2)};
auto bias_base = _ctx.at(bias_index).data().base();
const auto bias_size = _ctx.at(bias_index).shape().asVector();
};
}
-Initializer
-InitializerGenerator::generateBias(const ::internal::tflite::op::FullyConnected::Node &node)
+Initializer InitializerGenerator::generateBias(const graph::operation::FullyConnected::Node &node)
{
- const ::neurun::graph::operand::Index bias_index{node.param().bias_index};
+ const ::neurun::graph::operand::Index bias_index{node.getInputs().at(2)};
auto bias_base = _ctx.at(bias_index).data().base();
const auto bias_size = _ctx.at(bias_index).shape().asVector();
public:
InitializerGenerator(const neurun::graph::operand::Set &ctx);
- Initializer generateWeight(const ::internal::tflite::op::Conv2D::implicit::Node &node) override;
- Initializer generateWeight(const ::internal::tflite::op::FullyConnected::Node &node) override;
+ Initializer generateWeight(const graph::operation::Conv2D::Implicit::Node &node) override;
+ Initializer generateWeight(const graph::operation::FullyConnected::Node &node) override;
- Initializer generateBias(const ::internal::tflite::op::Conv2D::implicit::Node &node) override;
- Initializer generateBias(const ::internal::tflite::op::FullyConnected::Node &node) override;
+ Initializer generateBias(const graph::operation::Conv2D::Implicit::Node &node) override;
+ Initializer generateBias(const graph::operation::FullyConnected::Node &node) override;
private:
const neurun::graph::operand::Set &_ctx;
// DO NOTHING
}
-Stage StageGenerator::generate(const ::internal::tflite::op::Conv2D::implicit::Node &node)
+Stage StageGenerator::generate(const graph::operation::Conv2D::Implicit::Node &node)
{
- const ::neurun::graph::operand::Index ofm_index{node.param().ofm_index};
- const ::neurun::graph::operand::Index ifm_index{node.param().ifm_index};
- const ::neurun::graph::operand::Index ker_index{node.param().ker_index};
- const ::neurun::graph::operand::Index bias_index{node.param().bias_index};
+ const ::neurun::graph::operand::Index ofm_index{node.getOutputs().at(0)};
+ const ::neurun::graph::operand::Index ifm_index{node.getInputs().at(0)};
+ const ::neurun::graph::operand::Index ker_index{node.getInputs().at(1)};
+ const ::neurun::graph::operand::Index bias_index{node.getInputs().at(2)};
const ::neurun::graph::operand::Index vstride_index{node.param().vstride_index};
const ::neurun::graph::operand::Index hstride_index{node.param().hstride_index};
};
}
-Stage StageGenerator::generate(const ::internal::tflite::op::MaxPool2D::implicit::Node &node)
+Stage StageGenerator::generate(const graph::operation::MaxPool2D::Implicit::Node &node)
{
- const ::neurun::graph::operand::Index ofm_index{node.param().ofm_index};
- const ::neurun::graph::operand::Index ifm_index{node.param().ifm_index};
+ const ::neurun::graph::operand::Index ofm_index{node.getOutputs().at(0)};
+ const ::neurun::graph::operand::Index ifm_index{node.getInputs().at(0)};
const ::neurun::graph::operand::Index kh_index{node.param().kh_index};
const ::neurun::graph::operand::Index kw_index{node.param().kw_index};
};
}
-Stage StageGenerator::generate(const ::internal::tflite::op::AvgPool2D::implicit::Node &node)
+Stage StageGenerator::generate(const graph::operation::AvgPool2D::Implicit::Node &node)
{
- const ::neurun::graph::operand::Index ofm_index{node.param().ofm_index};
- const ::neurun::graph::operand::Index ifm_index{node.param().ifm_index};
+ const ::neurun::graph::operand::Index ofm_index{node.getOutputs().at(0)};
+ const ::neurun::graph::operand::Index ifm_index{node.getInputs().at(0)};
const ::neurun::graph::operand::Index kh_index{node.param().kh_index};
const ::neurun::graph::operand::Index kw_index{node.param().kw_index};
};
}
-Stage StageGenerator::generate(const ::internal::tflite::op::Concat::Node &node)
+Stage StageGenerator::generate(const graph::operation::Concat::Node &node)
{
- const ::neurun::graph::operand::Index ofm_index{node.param().ofm_index};
+ const ::neurun::graph::operand::Index ofm_index{node.getOutputs().at(0)};
const ::neurun::graph::operand::Index axis_index{node.param().axis_index};
struct Param
Param param;
- param.output_index = node.param().ofm_index;
- param.input_indexes = node.param().ifm_indexes;
+ param.output_index = ofm_index.asInt();
+ for (const auto &e : node.getInputs().list())
+ {
+ param.input_indexes.emplace_back(e.asInt());
+ }
param.axis = _ctx.at(axis_index).asScalar<int32_t>();
auto tensors = _tensor_builder;
};
}
-Stage StageGenerator::generate(const ::internal::tflite::op::FullyConnected::Node &node)
+Stage StageGenerator::generate(const graph::operation::FullyConnected::Node &node)
{
- const ::neurun::graph::operand::Index output_index{node.param().output_index};
- const ::neurun::graph::operand::Index input_index{node.param().input_index};
- const ::neurun::graph::operand::Index weight_index{node.param().weight_index};
- const ::neurun::graph::operand::Index bias_index{node.param().bias_index};
+ const ::neurun::graph::operand::Index output_index{node.getOutputs().at(0)};
+ const ::neurun::graph::operand::Index input_index{node.getInputs().at(0)};
+ const ::neurun::graph::operand::Index weight_index{node.getInputs().at(1)};
+ const ::neurun::graph::operand::Index bias_index{node.getInputs().at(2)};
const ::neurun::graph::operand::Index activation_index{node.param().activation_index};
// Construct operation parameters
};
}
-Stage StageGenerator::generate(const ::internal::tflite::op::Reshape::Node &node)
+Stage StageGenerator::generate(const graph::operation::Reshape::Node &node)
{
- const ::neurun::graph::operand::Index output_index{node.param().output_index};
- const ::neurun::graph::operand::Index input_index{node.param().input_index};
+ const ::neurun::graph::operand::Index output_index{node.getOutputs().at(0)};
+ const ::neurun::graph::operand::Index input_index{node.getInputs().at(0)};
struct Param
{
};
}
-Stage StageGenerator::generate(const ::internal::tflite::op::Softmax::Node &node)
+Stage StageGenerator::generate(const graph::operation::Softmax::Node &node)
{
- const ::neurun::graph::operand::Index output_index{node.param().output_index};
- const ::neurun::graph::operand::Index input_index{node.param().input_index};
+ const ::neurun::graph::operand::Index output_index{node.getOutputs().at(0)};
+ const ::neurun::graph::operand::Index input_index{node.getInputs().at(0)};
const ::neurun::graph::operand::Index scale_index{node.param().scale_index};
assert(_ctx.at(scale_index).shape().rank() == 0);
};
}
-Stage StageGenerator::generate(const ::internal::tflite::op::NOP::Node &node)
+Stage StageGenerator::generate(const graph::operation::NOP::Node &node)
{
// DO NOTHING
}
virtual std::shared_ptr<ITensorBuilder> tensor_builder() override { return _tensor_builder; }
- virtual Stage generate(const ::internal::tflite::op::Conv2D::implicit::Node &node) override;
- virtual Stage generate(const ::internal::tflite::op::MaxPool2D::implicit::Node &node) override;
- virtual Stage generate(const ::internal::tflite::op::AvgPool2D::implicit::Node &node) override;
- virtual Stage generate(const ::internal::tflite::op::Concat::Node &node) override;
- virtual Stage generate(const ::internal::tflite::op::FullyConnected::Node &node) override;
- virtual Stage generate(const ::internal::tflite::op::Reshape::Node &node) override;
- virtual Stage generate(const ::internal::tflite::op::Softmax::Node &node) override;
- virtual Stage generate(const ::internal::tflite::op::NOP::Node &node) override;
+ virtual Stage generate(const graph::operation::Conv2D::Implicit::Node &node) override;
+ virtual Stage generate(const graph::operation::MaxPool2D::Implicit::Node &node) override;
+ virtual Stage generate(const graph::operation::AvgPool2D::Implicit::Node &node) override;
+ virtual Stage generate(const graph::operation::Concat::Node &node) override;
+ virtual Stage generate(const graph::operation::FullyConnected::Node &node) override;
+ virtual Stage generate(const graph::operation::Reshape::Node &node) override;
+ virtual Stage generate(const graph::operation::Softmax::Node &node) override;
+ virtual Stage generate(const graph::operation::NOP::Node &node) override;
private:
const neurun::graph::operand::Set &_ctx;
}
Initializer
-InitializerGenerator::generateWeight(const ::internal::tflite::op::Conv2D::implicit::Node &node)
+InitializerGenerator::generateWeight(const graph::operation::Conv2D::Implicit::Node &node)
{
- const ::neurun::graph::operand::Index ker_index{node.param().ker_index};
+ const ::neurun::graph::operand::Index ker_index{node.getInputs().at(1)};
const auto ker_shape = _ctx.at(ker_index).shape().asKernel();
auto ker_base = _ctx.at(ker_index).data().base();
};
}
-Initializer
-InitializerGenerator::generateWeight(const ::internal::tflite::op::FullyConnected::Node &node)
+Initializer InitializerGenerator::generateWeight(const graph::operation::FullyConnected::Node &node)
{
- const ::neurun::graph::operand::Index weight_index{node.param().weight_index};
- const ::neurun::graph::operand::Index input_index{node.param().input_index};
+ const ::neurun::graph::operand::Index weight_index{node.getInputs().at(1)};
+ const ::neurun::graph::operand::Index input_index{node.getInputs().at(0)};
const auto num_output = _ctx.at(weight_index).shape().dim(0);
auto weight_base = _ctx.at(weight_index).data().base();
}
}
-Initializer
-InitializerGenerator::generateBias(const ::internal::tflite::op::Conv2D::implicit::Node &node)
+Initializer InitializerGenerator::generateBias(const graph::operation::Conv2D::Implicit::Node &node)
{
// TODO Refactor so we can reuse the common code
- const ::neurun::graph::operand::Index bias_index{node.param().bias_index};
+ const ::neurun::graph::operand::Index bias_index{node.getInputs().at(2)};
auto bias_base = _ctx.at(bias_index).data().base();
const auto bias_size = _ctx.at(bias_index).shape().asVector();
};
}
-Initializer
-InitializerGenerator::generateBias(const ::internal::tflite::op::FullyConnected::Node &node)
+Initializer InitializerGenerator::generateBias(const graph::operation::FullyConnected::Node &node)
{
- const ::neurun::graph::operand::Index bias_index{node.param().bias_index};
+ const ::neurun::graph::operand::Index bias_index{node.getInputs().at(2)};
auto bias_base = _ctx.at(bias_index).data().base();
auto bias_type = _ctx.at(bias_index).typeInfo().type();
public:
InitializerGenerator(const neurun::graph::operand::Set &ctx);
- Initializer generateWeight(const ::internal::tflite::op::Conv2D::implicit::Node &node) override;
- Initializer generateWeight(const ::internal::tflite::op::FullyConnected::Node &node) override;
+ Initializer generateWeight(const graph::operation::Conv2D::Implicit::Node &node) override;
+ Initializer generateWeight(const graph::operation::FullyConnected::Node &node) override;
- Initializer generateBias(const ::internal::tflite::op::Conv2D::implicit::Node &node) override;
- Initializer generateBias(const ::internal::tflite::op::FullyConnected::Node &node) override;
+ Initializer generateBias(const graph::operation::Conv2D::Implicit::Node &node) override;
+ Initializer generateBias(const graph::operation::FullyConnected::Node &node) override;
private:
const neurun::graph::operand::Set &_ctx;
// DO NOTHING
}
-Stage StageGenerator::generate(const ::internal::tflite::op::Conv2D::implicit::Node &node)
+Stage StageGenerator::generate(const graph::operation::Conv2D::Implicit::Node &node)
{
- const ::neurun::graph::operand::Index ofm_index{node.param().ofm_index};
- const ::neurun::graph::operand::Index ifm_index{node.param().ifm_index};
- const ::neurun::graph::operand::Index ker_index{node.param().ker_index};
- const ::neurun::graph::operand::Index bias_index{node.param().bias_index};
+ const ::neurun::graph::operand::Index ofm_index{node.getOutputs().at(0)};
+ const ::neurun::graph::operand::Index ifm_index{node.getInputs().at(0)};
+ const ::neurun::graph::operand::Index ker_index{node.getInputs().at(1)};
+ const ::neurun::graph::operand::Index bias_index{node.getInputs().at(2)};
const ::neurun::graph::operand::Index vstride_index{node.param().vstride_index};
const ::neurun::graph::operand::Index hstride_index{node.param().hstride_index};
};
}
-Stage StageGenerator::generate(const ::internal::tflite::op::MaxPool2D::implicit::Node &node)
+Stage StageGenerator::generate(const graph::operation::MaxPool2D::Implicit::Node &node)
{
VERBOSE(MaxPool2D) << "generate CPU MaxPool2D" << std::endl;
- const ::neurun::graph::operand::Index ofm_index{node.param().ofm_index};
- const ::neurun::graph::operand::Index ifm_index{node.param().ifm_index};
+ const ::neurun::graph::operand::Index ofm_index{node.getOutputs().at(0)};
+ const ::neurun::graph::operand::Index ifm_index{node.getInputs().at(0)};
const ::neurun::graph::operand::Index kh_index{node.param().kh_index};
const ::neurun::graph::operand::Index kw_index{node.param().kw_index};
};
}
-Stage StageGenerator::generate(const ::internal::tflite::op::AvgPool2D::implicit::Node &node)
+Stage StageGenerator::generate(const graph::operation::AvgPool2D::Implicit::Node &node)
{
VERBOSE(AvgPool2D) << "generate CPU AvgPool2D" << std::endl;
- const ::neurun::graph::operand::Index ofm_index{node.param().ofm_index};
- const ::neurun::graph::operand::Index ifm_index{node.param().ifm_index};
+ const ::neurun::graph::operand::Index ofm_index{node.getOutputs().at(0)};
+ const ::neurun::graph::operand::Index ifm_index{node.getInputs().at(0)};
const ::neurun::graph::operand::Index kh_index{node.param().kh_index};
const ::neurun::graph::operand::Index kw_index{node.param().kw_index};
};
}
-Stage StageGenerator::generate(const ::internal::tflite::op::Concat::Node &node)
+Stage StageGenerator::generate(const graph::operation::Concat::Node &node)
{
VERBOSE(Concat) << "generate CPU Concat" << std::endl;
- const ::neurun::graph::operand::Index ofm_index{node.param().ofm_index};
+ const ::neurun::graph::operand::Index ofm_index{node.getOutputs().at(0)};
const ::neurun::graph::operand::Index axis_index{node.param().axis_index};
struct Param
Param param;
- param.output_index = node.param().ofm_index;
- param.input_indexes = node.param().ifm_indexes;
+ param.output_index = ofm_index.asInt();
+ for (const auto &e : node.getInputs().list())
+ {
+ param.input_indexes.emplace_back(e.asInt());
+ }
param.axis = _ctx.at(axis_index).asScalar<int32_t>();
param.ofm_shape = ::neurun::kernel::cpu::getShape(_ctx.at(ofm_index));
- for (auto ifm_ind : node.param().ifm_indexes)
+ for (auto e : node.getInputs().list())
{
- const ::neurun::graph::operand::Index ifm_index{ifm_ind};
- param.ifm_shapes.emplace_back(::neurun::kernel::cpu::getShape(_ctx.at(ifm_index)));
+ param.ifm_shapes.emplace_back(::neurun::kernel::cpu::getShape(_ctx.at(e)));
}
auto tensors = _tensor_builder;
};
}
-Stage StageGenerator::generate(const ::internal::tflite::op::FullyConnected::Node &node)
+Stage StageGenerator::generate(const graph::operation::FullyConnected::Node &node)
{
VERBOSE(FullyConnected) << "generate CPU FullyConnected" << std::endl;
- const ::neurun::graph::operand::Index output_index{node.param().output_index};
- const ::neurun::graph::operand::Index input_index{node.param().input_index};
- const ::neurun::graph::operand::Index weight_index{node.param().weight_index};
- const ::neurun::graph::operand::Index bias_index{node.param().bias_index};
+ const ::neurun::graph::operand::Index output_index{node.getOutputs().at(0)};
+ const ::neurun::graph::operand::Index input_index{node.getInputs().at(0)};
+ const ::neurun::graph::operand::Index weight_index{node.getInputs().at(1)};
+ const ::neurun::graph::operand::Index bias_index{node.getInputs().at(2)};
const ::neurun::graph::operand::Index activation_index{node.param().activation_index};
// Construct operation parameters
};
}
-Stage StageGenerator::generate(const ::internal::tflite::op::Reshape::Node &node)
+Stage StageGenerator::generate(const graph::operation::Reshape::Node &node)
{
- const ::neurun::graph::operand::Index output_index{node.param().output_index};
- const ::neurun::graph::operand::Index input_index{node.param().input_index};
+ const ::neurun::graph::operand::Index output_index{node.getOutputs().at(0)};
+ const ::neurun::graph::operand::Index input_index{node.getInputs().at(0)};
struct Param
{
};
}
-Stage StageGenerator::generate(const ::internal::tflite::op::Softmax::Node &node)
+Stage StageGenerator::generate(const graph::operation::Softmax::Node &node)
{
VERBOSE(Softmax) << "generate CPU Softmax" << std::endl;
- const ::neurun::graph::operand::Index output_index{node.param().output_index};
- const ::neurun::graph::operand::Index input_index{node.param().input_index};
+ const ::neurun::graph::operand::Index output_index{node.getOutputs().at(0)};
+ const ::neurun::graph::operand::Index input_index{node.getInputs().at(0)};
const ::neurun::graph::operand::Index scale_index{node.param().scale_index};
struct Param
};
}
-Stage StageGenerator::generate(const ::internal::tflite::op::NOP::Node &node)
+Stage StageGenerator::generate(const graph::operation::NOP::Node &node)
{
// DO NOTHING
}
virtual std::shared_ptr<ITensorBuilder> tensor_builder() override { return _tensor_builder; }
- virtual Stage generate(const ::internal::tflite::op::Conv2D::implicit::Node &node) override;
- virtual Stage generate(const ::internal::tflite::op::MaxPool2D::implicit::Node &node) override;
- virtual Stage generate(const ::internal::tflite::op::AvgPool2D::implicit::Node &node) override;
- virtual Stage generate(const ::internal::tflite::op::Concat::Node &node) override;
- virtual Stage generate(const ::internal::tflite::op::FullyConnected::Node &node) override;
- virtual Stage generate(const ::internal::tflite::op::Reshape::Node &node) override;
- virtual Stage generate(const ::internal::tflite::op::Softmax::Node &node) override;
- virtual Stage generate(const ::internal::tflite::op::NOP::Node &node) override;
+ virtual Stage generate(const graph::operation::Conv2D::Implicit::Node &node) override;
+ virtual Stage generate(const graph::operation::MaxPool2D::Implicit::Node &node) override;
+ virtual Stage generate(const graph::operation::AvgPool2D::Implicit::Node &node) override;
+ virtual Stage generate(const graph::operation::Concat::Node &node) override;
+ virtual Stage generate(const graph::operation::FullyConnected::Node &node) override;
+ virtual Stage generate(const graph::operation::Reshape::Node &node) override;
+ virtual Stage generate(const graph::operation::Softmax::Node &node) override;
+ virtual Stage generate(const graph::operation::NOP::Node &node) override;
private:
const neurun::graph::operand::Set &_ctx;
if (backend_all_str.compare("none") != 0)
{
VERBOSE(BackendResolver) << "Use backend for all ops: " << backend_all_str << std::endl;
-#define OP(InternalName, NnApiName) \
- { \
- auto backend = _backend_manager.get(backend_all_str); \
- _gen_map[typeid(::internal::tflite::op::InternalName::Node)] = backend; \
+#define OP(InternalName, NnApiName) \
+ { \
+ auto backend = _backend_manager.get(backend_all_str); \
+ _gen_map[typeid(graph::operation::InternalName::Node)] = backend; \
}
#include "internal/op/Op.lst"
#undef OP
::nnfw::util::EnvVar{std::string("OP_BACKEND_") + #NnApiName}.asString("acl_cl"); \
auto backend = _backend_manager.get(backend_str); \
VERBOSE(BackendResolver) << "backend for " << #NnApiName << ": " << backend_str << std::endl; \
- _gen_map[typeid(::internal::tflite::op::InternalName::Node)] = backend; \
+ _gen_map[typeid(graph::operation::InternalName::Node)] = backend; \
}
#include "internal/op/Op.lst"
namespace codegen
{
-void Planner::visit(const ::internal::tflite::op::Conv2D::implicit::Node &node)
+void Planner::visit(const graph::operation::Conv2D::Implicit::Node &node)
{
- const ::neurun::graph::operand::Index ofm_index{node.param().ofm_index};
+ const auto ofm_index = node.getOutputs().at(0);
- const ::neurun::graph::operand::Index ifm_index{node.param().ifm_index};
- const ::neurun::graph::operand::Index ker_index{node.param().ker_index};
- const ::neurun::graph::operand::Index bias_index{node.param().bias_index};
+ const auto ifm_index = node.getInputs().at(0);
+ const auto ker_index = node.getInputs().at(1);
+ const auto bias_index = node.getInputs().at(2);
const auto ofm_shape = _ctx.at(ofm_index).shape().asFeature();
const auto ifm_shape = _ctx.at(ifm_index).shape().asFeature();
_builder.addStage(stage_gen->generate(node));
}
-void Planner::visit(const ::internal::tflite::op::MaxPool2D::implicit::Node &node)
+void Planner::visit(const graph::operation::MaxPool2D::Implicit::Node &node)
{
- const ::neurun::graph::operand::Index ofm_index{node.param().ofm_index};
- const ::neurun::graph::operand::Index ifm_index{node.param().ifm_index};
+ const ::neurun::graph::operand::Index ofm_index{node.getOutputs().at(0)};
+ const ::neurun::graph::operand::Index ifm_index{node.getInputs().at(0)};
const auto ofm_shape = _ctx.at(ofm_index).shape().asFeature();
const auto ifm_shape = _ctx.at(ifm_index).shape().asFeature();
_builder.addStage(stage_gen->generate(node));
}
-void Planner::visit(const ::internal::tflite::op::AvgPool2D::implicit::Node &node)
+void Planner::visit(const graph::operation::AvgPool2D::Implicit::Node &node)
{
- const ::neurun::graph::operand::Index ofm_index{node.param().ofm_index};
- const ::neurun::graph::operand::Index ifm_index{node.param().ifm_index};
+ const ::neurun::graph::operand::Index ofm_index{node.getOutputs().at(0)};
+ const ::neurun::graph::operand::Index ifm_index{node.getInputs().at(0)};
const auto ofm_shape = _ctx.at(ofm_index).shape().asFeature();
const auto ifm_shape = _ctx.at(ifm_index).shape().asFeature();
_builder.addStage(stage_gen->generate(node));
}
-void Planner::visit(const ::internal::tflite::op::Concat::Node &node)
+void Planner::visit(const graph::operation::Concat::Node &node)
{
- const ::neurun::graph::operand::Index ofm_index{node.param().ofm_index};
+ const ::neurun::graph::operand::Index ofm_index{node.getOutputs().at(0)};
- // NOTE This implementation assumes that inputs and output are a feature
+ // NOTE This implementation assumes that input and output are a feature
// TODO Remove this assumption
const auto ofm_shape = _ctx.at(ofm_index).shape().asFeature();
// Set Shape Constraints (for input)
uint32_t depth = 0;
- for (const auto &index : node.param().ifm_indexes)
+ for (const auto &index : node.getInputs().list())
{
const ::neurun::graph::operand::Index ifm_index{index};
const auto ifm_shape = _ctx.at(ifm_index).shape().asFeature();
_builder.addStage(stage_gen->generate(node));
}
-void Planner::visit(const ::internal::tflite::op::FullyConnected::Node &node)
+void Planner::visit(const graph::operation::FullyConnected::Node &node)
{
VERBOSE(FullyConnected) << "Configure FULLY_CONNECTED operation" << std::endl;
- const ::neurun::graph::operand::Index output_index{node.param().output_index};
+ const ::neurun::graph::operand::Index output_index{node.getOutputs().at(0)};
- const ::neurun::graph::operand::Index input_index{node.param().input_index};
- const ::neurun::graph::operand::Index weight_index{node.param().weight_index};
- const ::neurun::graph::operand::Index bias_index{node.param().bias_index};
+ const ::neurun::graph::operand::Index input_index{node.getInputs().at(0)};
+ const ::neurun::graph::operand::Index weight_index{node.getInputs().at(1)};
+ const ::neurun::graph::operand::Index bias_index{node.getInputs().at(2)};
const ::neurun::graph::operand::Index activation_index{node.param().activation_index};
_builder.addStage(stage_gen->generate(node));
}
-void Planner::visit(const ::internal::tflite::op::Reshape::Node &node)
+void Planner::visit(const graph::operation::Reshape::Node &node)
{
- const ::neurun::graph::operand::Index output_index{node.param().output_index};
- const ::neurun::graph::operand::Index input_index{node.param().input_index};
+ const ::neurun::graph::operand::Index output_index{node.getOutputs().at(0)};
+ const ::neurun::graph::operand::Index input_index{node.getInputs().at(0)};
// NOTE The content of a tensor specified by shape_index should be aligned with
// output tensor shape
_builder.addStage(stage_gen->generate(node));
}
-void Planner::visit(const ::internal::tflite::op::Softmax::Node &node)
+void Planner::visit(const graph::operation::Softmax::Node &node)
{
VERBOSE(Softmax) << "Configure SOFTMAX operation" << std::endl;
- const ::neurun::graph::operand::Index output_index{node.param().output_index};
- const ::neurun::graph::operand::Index input_index{node.param().input_index};
+ const ::neurun::graph::operand::Index output_index{node.getOutputs().at(0)};
+ const ::neurun::graph::operand::Index input_index{node.getInputs().at(0)};
assert(_ctx.at(output_index).shape().rank() == _ctx.at(input_index).shape().rank());
_builder.addStage(stage_gen->generate(node));
}
-void Planner::visit(const ::internal::tflite::op::NOP::Node &node)
+void Planner::visit(const graph::operation::NOP::Node &node)
{
// DO NOTHING
// TODO : It's just for graph manipulation test now, it should be added tensor copy stage later.
#ifndef __NEURUN_CODEGEN_PLANNER_H__
#define __NEURUN_CODEGEN_PLANNER_H__
-#include "internal/op/NodeVisitor.h"
+#include "graph/operation/NodeVisitor.h"
namespace neurun
{
class IPlanBuilder;
class BackendResolver;
-class Planner : public ::internal::tflite::op::NodeVisitor
+class Planner : public graph::operation::NodeVisitor
{
public:
Planner(const neurun::graph::operand::Set &ctx, neurun::codegen::IPlanBuilder &builder,
}
public:
- void visit(const ::internal::tflite::op::Conv2D::implicit::Node &node) override;
- void visit(const ::internal::tflite::op::MaxPool2D::implicit::Node &node) override;
- void visit(const ::internal::tflite::op::AvgPool2D::implicit::Node &node) override;
- void visit(const ::internal::tflite::op::Concat::Node &node) override;
- void visit(const ::internal::tflite::op::FullyConnected::Node &node) override;
- void visit(const ::internal::tflite::op::Reshape::Node &node) override;
- void visit(const ::internal::tflite::op::Softmax::Node &node) override;
- void visit(const ::internal::tflite::op::NOP::Node &node) override;
+ virtual void visit(const graph::operation::Conv2D::Implicit::Node &) override;
+ virtual void visit(const graph::operation::MaxPool2D::Implicit::Node &) override;
+ virtual void visit(const graph::operation::AvgPool2D::Implicit::Node &) override;
+ virtual void visit(const graph::operation::Concat::Node &) override;
+ virtual void visit(const graph::operation::Reshape::Node &) override;
+ virtual void visit(const graph::operation::FullyConnected::Node &) override;
+ virtual void visit(const graph::operation::Softmax::Node &) override;
+ virtual void visit(const graph::operation::NOP::Node &) override;
private:
const neurun::graph::operand::Set &_ctx;
namespace codegen
{
-void TensorMarker::visit(const ::internal::tflite::op::Conv2D::implicit::Node &node)
+void TensorMarker::visit(const graph::operation::Conv2D::Implicit::Node &node)
{
- const auto ¶m = node.param();
- mark(param.ofm_index);
- mark(param.ifm_index);
- mark(param.ker_index);
- mark(param.bias_index);
+ mark(node.getOutputs().at(0));
+ mark(node.getInputs().at(0));
+ mark(node.getInputs().at(1));
+ mark(node.getInputs().at(2));
}
-void TensorMarker::visit(const ::internal::tflite::op::MaxPool2D::implicit::Node &node)
+void TensorMarker::visit(const graph::operation::MaxPool2D::Implicit::Node &node)
{
- const auto ¶m = node.param();
- mark(param.ofm_index);
- mark(param.ifm_index);
+ mark(node.getOutputs().at(0));
+ mark(node.getInputs().at(0));
}
-void TensorMarker::visit(const ::internal::tflite::op::AvgPool2D::implicit::Node &node)
+void TensorMarker::visit(const graph::operation::AvgPool2D::Implicit::Node &node)
{
- const auto ¶m = node.param();
- mark(param.ofm_index);
- mark(param.ifm_index);
+ mark(node.getOutputs().at(0));
+ mark(node.getInputs().at(0));
}
-void TensorMarker::visit(const ::internal::tflite::op::Concat::Node &node)
+void TensorMarker::visit(const graph::operation::Concat::Node &node)
{
- const auto ¶m = node.param();
- mark(param.ofm_index);
- for (auto ind : param.ifm_indexes)
+ mark(node.getOutputs().at(0));
+ for (const auto &ind : node.getInputs().list())
{
mark(ind);
}
}
-void TensorMarker::visit(const ::internal::tflite::op::FullyConnected::Node &node)
+void TensorMarker::visit(const graph::operation::FullyConnected::Node &node)
{
- const auto ¶m = node.param();
- mark(param.output_index);
- mark(param.input_index);
- mark(param.weight_index);
- mark(param.bias_index);
+ mark(node.getOutputs().at(0));
+ mark(node.getInputs().at(0));
+ mark(node.getInputs().at(1));
+ mark(node.getInputs().at(2));
}
-void TensorMarker::visit(const ::internal::tflite::op::Reshape::Node &node)
+void TensorMarker::visit(const graph::operation::Reshape::Node &node)
{
- const auto ¶m = node.param();
- mark(param.output_index);
- mark(param.input_index);
+ mark(node.getOutputs().at(0));
+ mark(node.getInputs().at(0));
}
-void TensorMarker::visit(const ::internal::tflite::op::Softmax::Node &node)
+void TensorMarker::visit(const graph::operation::Softmax::Node &node)
{
- const auto ¶m = node.param();
- mark(param.output_index);
- mark(param.input_index);
+ mark(node.getOutputs().at(0));
+ mark(node.getInputs().at(0));
}
-void TensorMarker::visit(const ::internal::tflite::op::NOP::Node &node)
+void TensorMarker::visit(const graph::operation::NOP::Node &node)
{
// DO NOTHING
}
#ifndef __NEURUN_CODEGEN_TENSOR_MARKER_H__
#define __NEURUN_CODEGEN_TENSOR_MARKER_H__
-#include "internal/op/NodeVisitor.h"
+#include "graph/operation/NodeVisitor.h"
#include "backend/ITensorBuilder.h"
namespace neurun
namespace codegen
{
-class TensorMarker : public ::internal::tflite::op::NodeVisitor
+class TensorMarker : public graph::operation::NodeVisitor
{
public:
TensorMarker(neurun::backend::ITensorBuilder &tensor_builder) : _tensor_builder{tensor_builder}
}
public:
- void visit(const ::internal::tflite::op::Conv2D::implicit::Node &node) override;
- void visit(const ::internal::tflite::op::MaxPool2D::implicit::Node &node) override;
- void visit(const ::internal::tflite::op::AvgPool2D::implicit::Node &node) override;
- void visit(const ::internal::tflite::op::Concat::Node &node) override;
- void visit(const ::internal::tflite::op::FullyConnected::Node &node) override;
- void visit(const ::internal::tflite::op::Reshape::Node &node) override;
- void visit(const ::internal::tflite::op::Softmax::Node &node) override;
- void visit(const ::internal::tflite::op::NOP::Node &node) override;
+ virtual void visit(const graph::operation::Conv2D::Implicit::Node &) override;
+ virtual void visit(const graph::operation::MaxPool2D::Implicit::Node &) override;
+ virtual void visit(const graph::operation::AvgPool2D::Implicit::Node &) override;
+ virtual void visit(const graph::operation::Concat::Node &) override;
+ virtual void visit(const graph::operation::Reshape::Node &) override;
+ virtual void visit(const graph::operation::FullyConnected::Node &) override;
+ virtual void visit(const graph::operation::Softmax::Node &) override;
+ virtual void visit(const graph::operation::NOP::Node &) override;
private:
- void mark(int32_t ind) { _tensor_builder.mark(::neurun::graph::operand::Index{ind}); }
+ void mark(const graph::operand::Index &ind) { _tensor_builder.mark(ind); }
private:
neurun::backend::ITensorBuilder &_tensor_builder;
auto linear = plan.model().linearize();
// Dump ops
- linear->accept(neurun::codegen::Dumper{});
+ // TODO Update Dumper with neurun::graph::operation::Node version
+ // linear->accept(neurun::codegen::Dumper{});
::internal::BackendManager backend_manager{plan};
neurun::codegen::BackendResolver backend_resolver{backend_manager};
auto &graph = model->deref();
+ auto node_param =
+ neurun::graph::operation::Node::InitParam{inputCount, inputs, outputCount, outputs};
+
switch (type)
{
case ANEURALNETWORKS_CONV_2D:
if (inputCount == 7)
{
- using internal::tflite::op::Conv2D::implicit::Param;
- using internal::tflite::op::Conv2D::implicit::Node;
using GraphNode = neurun::graph::operation::Conv2D::Implicit::Node;
- graph.addOperation(nnfw::make_unique<GraphNode>(
- nnfw::make_unique<Node>(Param{inputCount, inputs, outputCount, outputs})));
+ graph.addOperation(nnfw::make_unique<GraphNode>(node_param));
}
else
{
if (inputCount == 7)
{
- using internal::tflite::op::MaxPool2D::implicit::Param;
- using internal::tflite::op::MaxPool2D::implicit::Node;
using GraphNode = neurun::graph::operation::MaxPool2D::Implicit::Node;
- graph.addOperation(nnfw::make_unique<GraphNode>(
- nnfw::make_unique<Node>(Param{inputCount, inputs, outputCount, outputs})));
+ graph.addOperation(nnfw::make_unique<GraphNode>(node_param));
}
else
{
if (inputCount == 7)
{
- using internal::tflite::op::AvgPool2D::implicit::Param;
- using internal::tflite::op::AvgPool2D::implicit::Node;
using GraphNode = neurun::graph::operation::AvgPool2D::Implicit::Node;
- graph.addOperation(nnfw::make_unique<GraphNode>(
- nnfw::make_unique<Node>(Param{inputCount, inputs, outputCount, outputs})));
+ graph.addOperation(nnfw::make_unique<GraphNode>(node_param));
}
else
{
}
case ANEURALNETWORKS_CONCATENATION:
{
- using internal::tflite::op::Concat::Param;
- using internal::tflite::op::Concat::Node;
using GraphNode = neurun::graph::operation::Concat::Node;
- graph.addOperation(nnfw::make_unique<GraphNode>(
- nnfw::make_unique<Node>(Param{inputCount, inputs, outputCount, outputs})));
+ graph.addOperation(nnfw::make_unique<GraphNode>(node_param));
break;
}
case ANEURALNETWORKS_RESHAPE:
{
- using internal::tflite::op::Reshape::Param;
- using internal::tflite::op::Reshape::Node;
using GraphNode = neurun::graph::operation::Reshape::Node;
- graph.addOperation(nnfw::make_unique<GraphNode>(
- nnfw::make_unique<Node>(Param{inputCount, inputs, outputCount, outputs})));
+ graph.addOperation(nnfw::make_unique<GraphNode>(node_param));
break;
}
case ANEURALNETWORKS_FULLY_CONNECTED:
{
- using internal::tflite::op::FullyConnected::Param;
- using internal::tflite::op::FullyConnected::Node;
using GraphNode = neurun::graph::operation::FullyConnected::Node;
- graph.addOperation(nnfw::make_unique<GraphNode>(
- nnfw::make_unique<Node>(Param{inputCount, inputs, outputCount, outputs})));
+ graph.addOperation(nnfw::make_unique<GraphNode>(node_param));
break;
}
case ANEURALNETWORKS_SOFTMAX:
{
- using internal::tflite::op::Softmax::Param;
- using internal::tflite::op::Softmax::Node;
using GraphNode = neurun::graph::operation::Softmax::Node;
- graph.addOperation(nnfw::make_unique<GraphNode>(
- nnfw::make_unique<Node>(Param{inputCount, inputs, outputCount, outputs})));
+ graph.addOperation(nnfw::make_unique<GraphNode>(node_param));
break;
}
}
}
+IndexSet::IndexSet(std::initializer_list<uint32_t> list)
+{
+ for (auto val : list)
+ {
+ _set.emplace_back(val);
+ }
+}
+
bool IndexSet::contains(const Index &index) const
{
return std::find(_set.begin(), _set.end(), index) != _set.end();
IndexSet(void) = default;
IndexSet(std::initializer_list<Index> list);
IndexSet(std::initializer_list<int32_t> list);
+ IndexSet(std::initializer_list<uint32_t> list);
public:
void append(const Index &index) { _set.emplace_back(index); }
uint32_t size() const { return static_cast<uint32_t>(_set.size()); }
const std::vector<Index> &list() const { return _set; }
const Index &at(IO::Index set_index) const { return _set.at(set_index.asInt()); }
+ const Index &at(uint32_t index) const { return _set.at(index); }
bool contains(const Index &index) const;
private:
void Node::accept(NodeVisitor &&v) const { v.visit(*this); }
+Node::Node(const graph::operation::Node::InitParam &init_param)
+{
+ assert(init_param.input_count == 7);
+ assert(init_param.output_count == 1);
+
+ // Each input should be interpreted as follows:
+ //
+ // 0 -> IFM Tensor Index
+ // 1 -> Padding Code (ANEURALNETWORKS_PADDING_SAME or ANEURALNETWORKS_PADDING_VALID) Index
+ // 2 -> Horizontal (over width) Stride Index
+ // 3 -> Vertial (over height) Stride Index
+ // 4 -> Filter Width Index
+ // 5 -> Filter Height Index
+ // 6 -> FuseCode (activation) Index
+
+ setInputs({init_param.inputs[0]});
+ setOutputs({init_param.outputs[0]});
+
+ _param.padding_index = init_param.inputs[1];
+ _param.hstride_index = init_param.inputs[2];
+ _param.vstride_index = init_param.inputs[3];
+
+ _param.kw_index = init_param.inputs[4];
+ _param.kh_index = init_param.inputs[5];
+ _param.activation_index = init_param.inputs[6];
+}
+
void Node::setInputs(const operand::IndexSet &indexes)
{
assert(indexes.size() == 1);
- ::neurun::graph::operand::IO::Index index{0};
- _op->param().ifm_index = indexes.at(index).asInt();
+ graph::operation::Node::setInputs(indexes);
}
void Node::setOutputs(const operand::IndexSet &indexes)
{
assert(indexes.size() == 1);
- ::neurun::graph::operand::IO::Index index{0};
- _op->param().ofm_index = indexes.at(index).asInt();
+ graph::operation::Node::setOutputs(indexes);
}
} // namespace Implicit
#include <memory>
#include "graph/operation/Node.h"
-#include "internal/op/AvgPool2D.h"
namespace neurun
{
namespace Implicit
{
+struct Param
+{
+ int32_t kw_index;
+ int32_t kh_index;
+
+ int32_t hstride_index;
+ int32_t vstride_index;
+
+ int32_t padding_index;
+ int32_t activation_index;
+};
+
class Node : public graph::operation::Node
{
public:
- Node(std::unique_ptr<::internal::tflite::op::AvgPool2D::implicit::Node> &&op) : _op{std::move(op)}
- {
- }
+ Node(const graph::operation::Node::InitParam &init_param);
public:
virtual void accept(NodeVisitor &&) const override;
public:
- virtual operand::IndexSet getInputs() const override { return {_op->param().ifm_index}; }
- virtual operand::IndexSet getOutputs() const override { return {_op->param().ofm_index}; }
virtual void setInputs(const operand::IndexSet &indexes) override;
virtual void setOutputs(const operand::IndexSet &indexes) override;
- virtual const ::internal::tflite::op::Node *op() const override { return _op.get(); }
+
+public:
+ const Param ¶m() const { return _param; }
private:
- std::unique_ptr<::internal::tflite::op::AvgPool2D::implicit::Node> _op;
+ Param _param;
};
} // namespace Implicit
void Node::accept(NodeVisitor &&v) const { v.visit(*this); }
-operand::IndexSet Node::getInputs() const
+Node::Node(const graph::operation::Node::InitParam &init_param)
{
- operand::IndexSet set;
- for (auto index : _op->param().ifm_indexes)
- {
- operand::Index ind{index};
- set.append({ind});
- }
- return set;
-}
+ assert(init_param.input_count > 2); // At least one one input tensor and axis
+ assert(init_param.output_count == 1);
+
+ // When there are N + 1 inputs, each input should be interpreted as follows:
+ //
+ // [0, N) -> Input tensors
+ // N -> Axis
+ //
-void Node::setInputs(const operand::IndexSet &indexes)
-{
- std::vector<int32_t> inds;
- for (auto index : indexes.list())
{
- inds.emplace_back(index.asInt());
+ operand::IndexSet inds;
+ for (uint32_t n = 0; n < init_param.input_count - 1; ++n)
+ {
+ inds.append(operand::Index{init_param.inputs[n]});
+ }
+ setInputs(inds);
}
- _op->param().ifm_indexes = inds;
+ setOutputs({init_param.outputs[0]});
+
+ _param.axis_index = init_param.inputs[init_param.input_count - 1];
}
void Node::setOutputs(const operand::IndexSet &indexes)
{
assert(indexes.size() == 1);
- ::neurun::graph::operand::IO::Index index{0};
- _op->param().ofm_index = indexes.at(index).asInt();
+ graph::operation::Node::setOutputs(indexes);
}
} // namespace Concat
namespace Concat
{
+struct Param
+{
+ int32_t axis_index;
+};
+
class Node : public graph::operation::Node
{
public:
- Node(std::unique_ptr<::internal::tflite::op::Concat::Node> &&op) : _op{std::move(op)} {}
+ Node(const graph::operation::Node::InitParam &init_param);
public:
virtual void accept(NodeVisitor &&) const override;
public:
- virtual operand::IndexSet getInputs() const override;
- virtual operand::IndexSet getOutputs() const override { return {_op->param().ofm_index}; }
- virtual void setInputs(const operand::IndexSet &indexes) override;
virtual void setOutputs(const operand::IndexSet &indexes) override;
- virtual const ::internal::tflite::op::Node *op() const override { return _op.get(); }
+
+public:
+ const Param ¶m() const { return _param; }
private:
- std::unique_ptr<::internal::tflite::op::Concat::Node> _op;
+ Param _param;
};
} // namespace Concat
void Node::accept(NodeVisitor &&v) const { v.visit(*this); }
+Node::Node(const graph::operation::Node::InitParam &init_param)
+{
+ assert(init_param.input_count == 7 && init_param.output_count == 1);
+
+ // Each input should be interpreted as follows:
+ //
+ //
+ // 0 -> IFM Tensor Index
+ // 1 -> Kernel Tensor Index
+ // 2 -> Bias Tensor Index
+ // 3 -> Padding Code (ANEURALNETWORKS_PADDING_SAME or ANEURALNETWORKS_PADDING_VALID) Index
+ // 4 -> Stride (width) Index
+ // 5 -> Stride (height) INdex
+ // 6 -> Activation Index
+
+ setInputs({init_param.inputs[0], init_param.inputs[1], init_param.inputs[2]});
+ setOutputs({init_param.outputs[0]});
+
+ _param.padding_index = init_param.inputs[3];
+ _param.hstride_index = init_param.inputs[4];
+ _param.vstride_index = init_param.inputs[5];
+ _param.activation_index = init_param.inputs[6];
+}
+
void Node::setInputs(const operand::IndexSet &indexes)
{
- assert(indexes.size() == 1);
+ assert(indexes.size() == 3);
- ::neurun::graph::operand::IO::Index index{0};
- _op->param().ifm_index = indexes.at(index).asInt();
+ graph::operation::Node::setInputs(indexes);
}
void Node::setOutputs(const operand::IndexSet &indexes)
{
assert(indexes.size() == 1);
- ::neurun::graph::operand::IO::Index index{0};
- _op->param().ofm_index = indexes.at(index).asInt();
+ graph::operation::Node::setOutputs(indexes);
}
} // namespace Implicit
namespace Implicit
{
+struct Param
+{
+ int32_t hstride_index;
+ int32_t vstride_index;
+
+ int32_t padding_index;
+ int32_t activation_index;
+};
+
class Node : public graph::operation::Node
{
public:
- Node(std::unique_ptr<::internal::tflite::op::Conv2D::implicit::Node> &&op) : _op{std::move(op)} {}
+ Node(const graph::operation::Node::InitParam &);
public:
virtual void accept(NodeVisitor &&) const override;
public:
- virtual operand::IndexSet getInputs() const override
- {
- return {_op->param().ifm_index, _op->param().ker_index, _op->param().bias_index};
- }
- virtual operand::IndexSet getOutputs() const override { return {_op->param().ofm_index}; }
virtual void setInputs(const operand::IndexSet &indexes) override;
virtual void setOutputs(const operand::IndexSet &indexes) override;
- virtual const ::internal::tflite::op::Node *op() const override { return _op.get(); }
+
+public:
+ const Param ¶m() const { return _param; }
private:
- std::unique_ptr<::internal::tflite::op::Conv2D::implicit::Node> _op;
+ Param _param;
};
} // namespace Implicit
void Node::accept(NodeVisitor &&v) const { v.visit(*this); }
+Node::Node(const graph::operation::Node::InitParam &init_param)
+{
+ assert(init_param.input_count == 4 && init_param.output_count == 1);
+
+ // Each input should be interpreted as follows:
+ //
+ // 0 -> A tensor, specifying the input.
+ // 1 -> A 2-D tensor, specifying the weights
+ // 2 -> A 1-D tensor, specifying the bias
+ // 3 -> An INT32 value, and has to be one of the FuseCode values
+
+ setInputs({init_param.inputs[0], init_param.inputs[1], init_param.inputs[2]});
+ setOutputs({init_param.outputs[0]});
+
+ _param.activation_index = init_param.inputs[3];
+}
+
void Node::setInputs(const operand::IndexSet &indexes)
{
- assert(indexes.size() == 1);
+ assert(indexes.size() == 3);
- ::neurun::graph::operand::IO::Index index{0};
- _op->param().input_index = indexes.at(index).asInt();
+ graph::operation::Node::setInputs(indexes);
}
void Node::setOutputs(const operand::IndexSet &indexes)
{
assert(indexes.size() == 1);
- ::neurun::graph::operand::IO::Index index{0};
- _op->param().output_index = indexes.at(index).asInt();
+ graph::operation::Node::setOutputs(indexes);
}
} // namespace FullyConnected
namespace FullyConnected
{
+struct Param
+{
+ int32_t activation_index;
+};
+
class Node : public graph::operation::Node
{
public:
- Node(std::unique_ptr<::internal::tflite::op::FullyConnected::Node> &&op) : _op{std::move(op)} {}
+ Node(const graph::operation::Node::InitParam &init_param);
public:
virtual void accept(NodeVisitor &&) const override;
public:
- virtual operand::IndexSet getInputs() const override
- {
- return {_op->param().input_index, _op->param().weight_index, _op->param().bias_index};
- }
- virtual operand::IndexSet getOutputs() const override { return {_op->param().output_index}; }
virtual void setInputs(const operand::IndexSet &indexes) override;
virtual void setOutputs(const operand::IndexSet &indexes) override;
- virtual const ::internal::tflite::op::Node *op() const override { return _op.get(); }
+
+public:
+ const Param ¶m() const { return _param; }
private:
- std::unique_ptr<::internal::tflite::op::FullyConnected::Node> _op;
+ Param _param;
};
} // namespace FullyConnected
void Node::accept(NodeVisitor &&v) const { v.visit(*this); }
+Node::Node(const graph::operation::Node::InitParam &init_param)
+{
+ assert(init_param.input_count == 7);
+ assert(init_param.output_count == 1);
+
+ // Each input should be interpreted as follows:
+ //
+ // 0 -> IFM Tensor Index
+ // 1 -> Padding Code (ANEURALNETWORKS_PADDING_SAME or ANEURALNETWORKS_PADDING_VALID) Index
+ // 2 -> Horizontal (over width) Stride Index
+ // 3 -> Vertial (over height) Stride Index
+ // 4 -> Filter Width Index
+ // 5 -> Filter Height Index
+ // 6 -> FuseCode (activation) Index
+
+ setInputs({init_param.inputs[0]});
+ setOutputs({init_param.outputs[0]});
+
+ _param.padding_index = init_param.inputs[1];
+ _param.hstride_index = init_param.inputs[2];
+ _param.vstride_index = init_param.inputs[3];
+
+ _param.kw_index = init_param.inputs[4];
+ _param.kh_index = init_param.inputs[5];
+ _param.activation_index = init_param.inputs[6];
+}
+
void Node::setInputs(const operand::IndexSet &indexes)
{
assert(indexes.size() == 1);
- ::neurun::graph::operand::IO::Index index{0};
- _op->param().ifm_index = indexes.at(index).asInt();
+ graph::operation::Node::setInputs(indexes);
}
void Node::setOutputs(const operand::IndexSet &indexes)
{
assert(indexes.size() == 1);
- ::neurun::graph::operand::IO::Index index{0};
- _op->param().ofm_index = indexes.at(index).asInt();
+ graph::operation::Node::setOutputs(indexes);
}
} // namespace Implicit
namespace Implicit
{
+struct Param
+{
+ int32_t kw_index;
+ int32_t kh_index;
+
+ int32_t hstride_index;
+ int32_t vstride_index;
+
+ int32_t padding_index;
+ int32_t activation_index;
+};
+
class Node : public graph::operation::Node
{
public:
- Node(std::unique_ptr<::internal::tflite::op::MaxPool2D::implicit::Node> &&op) : _op{std::move(op)}
- {
- }
+ virtual void accept(NodeVisitor &&) const override;
public:
- virtual void accept(NodeVisitor &&) const override;
+ Node(const graph::operation::Node::InitParam &init_param);
public:
- virtual operand::IndexSet getInputs() const override { return {_op->param().ifm_index}; }
- virtual operand::IndexSet getOutputs() const override { return {_op->param().ofm_index}; }
virtual void setInputs(const operand::IndexSet &indexes) override;
virtual void setOutputs(const operand::IndexSet &indexes) override;
- virtual const ::internal::tflite::op::Node *op() const override { return _op.get(); }
+
+public:
+ const Param ¶m() const { return _param; }
private:
- std::unique_ptr<::internal::tflite::op::MaxPool2D::implicit::Node> _op;
+ Param _param;
};
} // namespace Implicit
void Node::accept(NodeVisitor &&v) const { v.visit(*this); }
-operand::IndexSet Node::getInputs() const
-{
- operand::IndexSet set;
- for (auto index : _op->param().ifm_indexes)
- {
- operand::Index ind{index};
- set.append({ind});
- }
- return set;
-}
-
-operand::IndexSet Node::getOutputs() const
-{
- operand::IndexSet set;
- for (auto index : _op->param().ofm_indexes)
- {
- operand::Index ind{index};
- set.append({ind});
- }
- return set;
-}
-
-void Node::setInputs(const operand::IndexSet &indexes)
-{
- std::vector<int32_t> inds;
- for (auto index : indexes.list())
- {
- inds.emplace_back(index.asInt());
- }
- _op->param().ifm_indexes = inds;
-}
-
-void Node::setOutputs(const operand::IndexSet &indexes)
-{
- std::vector<int32_t> inds;
- for (auto index : indexes.list())
- {
- inds.emplace_back(index.asInt());
- }
- _op->param().ofm_indexes = inds;
-}
-
} // namespace NOP
} // namespace operation
} // namespace graph
class Node : public graph::operation::Node
{
public:
- Node(std::unique_ptr<::internal::tflite::op::NOP::Node> &&op) : _op{std::move(op)} {}
+ Node(const graph::operation::Node::InitParam &) {}
public:
virtual void accept(NodeVisitor &&) const override;
-
-public:
- virtual operand::IndexSet getInputs() const override;
- virtual operand::IndexSet getOutputs() const override;
- virtual void setInputs(const operand::IndexSet &indexes) override;
- virtual void setOutputs(const operand::IndexSet &indexes) override;
- virtual const ::internal::tflite::op::Node *op() const override { return _op.get(); }
-
-private:
- std::unique_ptr<::internal::tflite::op::NOP::Node> _op;
};
} // namespace NOP
class Node
{
public:
- virtual ~Node() = default;
+ struct InitParam
+ {
+ uint32_t input_count;
+ const uint32_t *inputs;
+ uint32_t output_count;
+ const uint32_t *outputs;
+ };
public:
virtual void accept(NodeVisitor &&) const = 0;
public:
- virtual operand::IndexSet getInputs() const = 0;
- virtual operand::IndexSet getOutputs() const = 0;
+ virtual const operand::IndexSet &getInputs() const { return _inputs; }
+ virtual const operand::IndexSet &getOutputs() const { return _outputs; }
// It's for only input/output tensors but const data.
- virtual void setInputs(const operand::IndexSet &indexes) = 0;
- virtual void setOutputs(const operand::IndexSet &indexes) = 0;
- virtual const ::internal::tflite::op::Node *op() const = 0;
+ virtual void setInputs(const operand::IndexSet &indexes) { _inputs = indexes; }
+ virtual void setOutputs(const operand::IndexSet &indexes) { _outputs = indexes; }
public:
void lower_info(std::unique_ptr<LowerInfo> &&lower_info) { _lower_info = std::move(lower_info); }
const LowerInfo *lower_info() const { return _lower_info.get(); }
private:
+ operand::IndexSet _inputs;
+ operand::IndexSet _outputs;
std::unique_ptr<LowerInfo> _lower_info;
};
void Node::accept(NodeVisitor &&v) const { v.visit(*this); }
+Node::Node(const graph::operation::Node::InitParam &init_param)
+{
+ assert(init_param.input_count == 2 && init_param.output_count == 1);
+
+ // Each input should be interpreted as follows:
+ //
+ // 0 -> A tensor, specifying the tensor to be reshaped.
+ // 1 -> A 1-D tensor of type ANEURALNETWORKS_TENSOR_INT32, defining the shape of the output
+ // tensor
+
+ setInputs({init_param.inputs[0]});
+ setOutputs({init_param.outputs[0]});
+
+ _param.shape_index = init_param.inputs[1];
+}
+
void Node::setInputs(const operand::IndexSet &indexes)
{
assert(indexes.size() == 1);
- ::neurun::graph::operand::IO::Index index{0};
- _op->param().input_index = indexes.at(index).asInt();
+ graph::operation::Node::setInputs(indexes);
}
void Node::setOutputs(const operand::IndexSet &indexes)
{
assert(indexes.size() == 1);
- ::neurun::graph::operand::IO::Index index{0};
- _op->param().output_index = indexes.at(index).asInt();
+ graph::operation::Node::setOutputs(indexes);
}
} // namespace Reshape
namespace Reshape
{
+struct Param
+{
+ int32_t shape_index;
+};
+
class Node : public graph::operation::Node
{
public:
- Node(std::unique_ptr<::internal::tflite::op::Reshape::Node> &&op) : _op{std::move(op)} {}
+ virtual void accept(NodeVisitor &&) const override;
public:
- virtual void accept(NodeVisitor &&) const override;
+ Node(const graph::operation::Node::InitParam &init_param);
public:
- virtual operand::IndexSet getInputs() const override
- {
- return {_op->param().input_index, _op->param().shape_index};
- }
- virtual operand::IndexSet getOutputs() const override { return {_op->param().output_index}; }
virtual void setInputs(const operand::IndexSet &indexes) override;
virtual void setOutputs(const operand::IndexSet &indexes) override;
- virtual const ::internal::tflite::op::Node *op() const override { return _op.get(); }
+
+public:
+ const Param ¶m() const { return _param; }
private:
- std::unique_ptr<::internal::tflite::op::Reshape::Node> _op;
+ Param _param;
};
} // namespace Reshape
void Node::accept(NodeVisitor &&v) const { v.visit(*this); }
+Node::Node(const graph::operation::Node::InitParam &init_param)
+{
+ assert(init_param.input_count == 2 && init_param.output_count == 1);
+
+ // Each input should be interpreted as follows:
+ //
+ // 0 -> A 2-D or 4-D tensor, specifying the tensor to be reshaped.
+ // 1 -> FLOAT32 value, specifying the positive scaling factor for the exponent, beta.
+
+ setInputs({init_param.inputs[0]});
+ setOutputs({init_param.outputs[0]});
+
+ _param.scale_index = init_param.inputs[1];
+}
+
void Node::setInputs(const operand::IndexSet &indexes)
{
assert(indexes.size() == 1);
- ::neurun::graph::operand::IO::Index index{0};
- _op->param().input_index = indexes.at(index).asInt();
+ graph::operation::Node::setInputs(indexes);
}
void Node::setOutputs(const operand::IndexSet &indexes)
{
assert(indexes.size() == 1);
- ::neurun::graph::operand::IO::Index index{0};
- _op->param().output_index = indexes.at(index).asInt();
+ graph::operation::Node::setOutputs(indexes);
}
} // namespace Softmax
namespace Softmax
{
+struct Param
+{
+ int32_t scale_index;
+};
+
class Node : public graph::operation::Node
{
public:
- Node(std::unique_ptr<::internal::tflite::op::Softmax::Node> &&op) : _op{std::move(op)} {}
+ virtual void accept(NodeVisitor &&) const override;
public:
- virtual void accept(NodeVisitor &&) const override;
+ Node(const graph::operation::Node::InitParam &init_param);
public:
- virtual operand::IndexSet getInputs() const override { return {_op->param().input_index}; }
- virtual operand::IndexSet getOutputs() const override { return {_op->param().output_index}; }
virtual void setInputs(const operand::IndexSet &indexes) override;
virtual void setOutputs(const operand::IndexSet &indexes) override;
- virtual const ::internal::tflite::op::Node *op() const override { return _op.get(); }
+
+public:
+ const Param ¶m() const { return _param; }
private:
- std::unique_ptr<::internal::tflite::op::Softmax::Node> _op;
+ Param _param;
};
} // namespace Softmax
// NOTE The relation between "Internal Name" and "NN API Name" is "1 : N".
// Internal Name | NN API Name
-OP(Conv2D::implicit , CONV_2D)
-OP(AvgPool2D::implicit , AVERAGE_POOL_2D)
-OP(MaxPool2D::implicit , MAX_POOL_2D)
+OP(Conv2D::Implicit , CONV_2D)
+OP(AvgPool2D::Implicit , AVERAGE_POOL_2D)
+OP(MaxPool2D::Implicit , MAX_POOL_2D)
OP(Concat , CONCATENATION)
OP(FullyConnected , FULLY_CONNECTED)
OP(Reshape , RESHAPE)
// 2. Append the node to vector when DFS for the node finishes(post order)
// 3. Reverse the order of nodes
- graph::Graph::PostDfsConstIterator().iterate(graph,
- [&](const neurun::graph::operation::Node &node) {
- auto op = node.op();
- _operations.emplace_back(op);
- });
+ graph::Graph::PostDfsConstIterator().iterate(
+ graph, [&](const neurun::graph::operation::Node &node) { _operations.emplace_back(&node); });
std::reverse(std::begin(_operations), std::end(_operations));
}
-void Linear::accept(::internal::tflite::op::NodeVisitor &&visitor) const
+void Linear::accept(graph::operation::NodeVisitor &&visitor) const
{
for (const auto op : _operations)
{
#include <vector>
-#include "internal/op/Node.h"
+#include "graph/operation/Node.h"
-namespace internal
+namespace neurun
{
-namespace tflite
+namespace graph
{
-namespace op
+namespace operation
{
struct NodeVisitor;
-} // namespace op
-} // namespace tflite
-} // namespace internal
+} // namespace operation
+} // namespace graph
+} // namespace neurun
namespace neurun
{
namespace linear
{
-class Linear : ::internal::tflite::op::Node
+class Linear
{
public:
Linear(const graph::Graph &graph);
Linear(const Linear &linear) = delete;
public:
- virtual void accept(::internal::tflite::op::NodeVisitor &&) const;
+ void accept(graph::operation::NodeVisitor &&visitor) const;
// TODO Remove this since tensor marking will be replaced with another way
virtual void markTensors(neurun::codegen::BackendResolver &) const;
public:
private:
- std::vector<const ::internal::tflite::op::Node *> _operations;
+ std::vector<const graph::operation::Node *> _operations;
};
} // namespace linear
class MockNode : public neurun::graph::operation::Node
{
public:
- MockNode(Index input, Index output) : _input{input}, _output{output}
+ MockNode(Index input, Index output)
{
- // DO NOTHING
+ setInputs({input});
+ setOutputs({output});
}
public:
virtual void accept(neurun::graph::operation::NodeVisitor &&) const override {}
-
-public:
- virtual IndexSet getInputs() const override { return {_input}; }
- virtual IndexSet getOutputs() const override { return {_output}; }
- virtual void setInputs(const IndexSet &indexes) override { _input = indexes.at(IOIndex{0}); }
- virtual void setOutputs(const IndexSet &indexes) override { _output = indexes.at(IOIndex{0}); }
- virtual const ::internal::tflite::op::Node *op() const override { return nullptr; }
-
-private:
- Index _input;
- Index _output;
};
class MultiInputMockNode : public neurun::graph::operation::Node
{
public:
- MultiInputMockNode(IndexSet inputs, Index output) : _output{output}
+ MultiInputMockNode(IndexSet inputs, Index output)
{
- for (auto index : inputs.list())
- {
- _inputs.emplace_back(index);
- }
+ setInputs(inputs);
+ setOutputs({output});
}
public:
virtual void accept(neurun::graph::operation::NodeVisitor &&) const override {}
-
-public:
- virtual IndexSet getInputs() const override
- {
- IndexSet set;
- for (auto index : _inputs)
- {
- set.append({index});
- }
- return set;
- }
-
- virtual IndexSet getOutputs() const override { return {_output}; }
- virtual void setInputs(const IndexSet &indexes) override
- {
- std::vector<Index> inputs;
- for (auto index : indexes.list())
- {
- inputs.emplace_back(index);
- }
- _inputs = inputs;
- }
- virtual void setOutputs(const IndexSet &indexes) override { _output = indexes.at(IOIndex{0}); }
- virtual const ::internal::tflite::op::Node *op() const override { return nullptr; }
-
-private:
- std::vector<Index> _inputs;
- Index _output;
};
} // namespace anonymous
class TestNode : public Node
{
public:
- TestNode() = default;
+ TestNode(const Node::InitParam &)
+ {
+ setInputs({1, 2, 3, 4});
+ setOutputs({5, 6, 7});
+ }
public:
virtual void accept(neurun::graph::operation::NodeVisitor &&) const override {}
-
-public:
- virtual neurun::graph::operand::IndexSet getInputs() const { return {1, 2, 3, 4}; }
- virtual neurun::graph::operand::IndexSet getOutputs() const { return {1, 2, 3}; }
- virtual void setInputs(const neurun::graph::operand::IndexSet &indexes) override {}
- virtual void setOutputs(const neurun::graph::operand::IndexSet &indexes) override {}
- virtual const ::internal::tflite::op::Node *op() const { return nullptr; }
};
TEST(graph_operation_Set, operation_test)
{
Set set;
- set.append(std::unique_ptr<Node>(new TestNode()));
+ set.append(std::unique_ptr<Node>(new TestNode(Node::InitParam{0, nullptr, 0, nullptr})));
Index idx{0u};
ASSERT_EQ(set.at(idx).getInputs().size(), 4);
ASSERT_EQ(set.at(idx).getOutputs().size(), 3);
using Index = neurun::graph::operand::IO::Index;
using IndexSet = neurun::graph::operand::IndexSet;
+using GraphNodeInitParam = neurun::graph::operation::Node::InitParam;
TEST(graph_operation_setIO, operation_setIO_conv)
{
}
uint32_t outoperand = graph.addOperand(shape, type).asInt();
- using Param = internal::tflite::op::Conv2D::implicit::Param;
- using Node = internal::tflite::op::Conv2D::implicit::Node;
using GraphNode = neurun::graph::operation::Conv2D::Implicit::Node;
- auto conv = nnfw::make_unique<GraphNode>(
- nnfw::make_unique<Node>(Param(7, params.data(), 1, &outoperand)));
-
+ auto conv = nnfw::make_unique<GraphNode>(GraphNodeInitParam{7, params.data(), 1, &outoperand});
ASSERT_EQ(conv->getInputs().at(Index{0}).asInt(), params[0]);
- conv->setInputs({8});
+ conv->setInputs({8, 9, 10});
ASSERT_NE(conv->getInputs().at(Index{0}).asInt(), params[0]);
ASSERT_EQ(conv->getInputs().at(Index{0}).asInt(), 8);
}
}
uint32_t outoperand = graph.addOperand(shape, type).asInt();
- using Param = internal::tflite::op::Concat::Param;
- using Node = internal::tflite::op::Concat::Node;
using GraphNode = neurun::graph::operation::Concat::Node;
- auto concat = nnfw::make_unique<GraphNode>(
- nnfw::make_unique<Node>(Param(7, params.data(), 1, &outoperand)));
+ auto concat = nnfw::make_unique<GraphNode>(GraphNodeInitParam{7, params.data(), 1, &outoperand});
ASSERT_EQ(concat->getInputs().size(), 6);
ASSERT_EQ(concat->getInputs().at(Index{0}).asInt(), params[0]);
class MockNode : public neurun::graph::operation::Node
{
public:
- MockNode(neurun::graph::operand::Index input, neurun::graph::operand::Index output)
- : _input{input}, _output{output}
+ MockNode(const neurun::graph::operand::Index &input, const neurun::graph::operand::Index &output)
{
- // DO NOTHING
+ setInputs({input});
+ setOutputs({output});
}
public:
virtual void accept(neurun::graph::operation::NodeVisitor &&) const override {}
-
-public:
- virtual neurun::graph::operand::IndexSet getInputs() const override { return {_input}; }
- virtual neurun::graph::operand::IndexSet getOutputs() const override { return {_output}; }
- virtual void setInputs(const neurun::graph::operand::IndexSet &indexes) override {}
- virtual void setOutputs(const neurun::graph::operand::IndexSet &indexes) override {}
- virtual const ::internal::tflite::op::Node *op() const override { return nullptr; }
-
-private:
- neurun::graph::operand::Index _input;
- neurun::graph::operand::Index _output;
};
TEST(Verifier, dag_checker)