namespace acl_cl
{
-InitializerGenerator::InitializerGenerator(const ::internal::tflite::operand::Set &ctx) : _ctx(ctx)
+InitializerGenerator::InitializerGenerator(const neurun::graph::operand::Set &ctx) : _ctx(ctx)
{
// DO NOTHING
}
#include "internal/IInitializerGenerator.h"
-#include "internal/Model.h"
+#include "graph/operand/Set.h"
namespace neurun
{
class InitializerGenerator : public ::internal::IInitializerGenerator
{
public:
- InitializerGenerator(const ::internal::tflite::operand::Set &ctx);
+ InitializerGenerator(const neurun::graph::operand::Set &ctx);
Initializer generateWeight(const ::internal::tflite::op::Conv2D::implicit::Node &node) override;
Initializer generateWeight(const ::internal::tflite::op::FullyConnected::Node &node) override;
Initializer generateBias(const ::internal::tflite::op::FullyConnected::Node &node) override;
private:
- const ::internal::tflite::operand::Set &_ctx;
+ const neurun::graph::operand::Set &_ctx;
};
} // namespace acl_cl
// StageGenerator
//
StageGenerator::StageGenerator(
- const ::internal::tflite::operand::Set &ctx,
- const std::shared_ptr<TensorBuilder> &tensor_builder,
+ const neurun::graph::operand::Set &ctx, const std::shared_ptr<TensorBuilder> &tensor_builder,
const std::shared_ptr<::internal::common::TensorBuilder> &common_tensor_builder)
: _ctx(ctx), _tensor_builder(tensor_builder), _common_tensor_builder(common_tensor_builder)
{
#include "internal/IStageGenerator.h"
-#include "internal/Model.h"
+#include "graph/operand/Set.h"
#include "backend/acl_cl/TensorBuilder.h"
#include "internal/common/TensorBuilder.h"
class StageGenerator : public ::internal::IStageGenerator
{
public:
- StageGenerator(const ::internal::tflite::operand::Set &ctx,
+ StageGenerator(const neurun::graph::operand::Set &ctx,
const std::shared_ptr<TensorBuilder> &tensor_builder,
const std::shared_ptr<::internal::common::TensorBuilder> &common_tensor_builder);
generate(const ::internal::tflite::op::TensorConvert::AclToCommon::Node &node) override;
private:
- const ::internal::tflite::operand::Set &_ctx;
+ const neurun::graph::operand::Set &_ctx;
std::shared_ptr<TensorBuilder> _tensor_builder;
std::shared_ptr<::internal::common::TensorBuilder> _common_tensor_builder;
};
namespace cpu
{
-InitializerGenerator::InitializerGenerator(const ::internal::tflite::operand::Set &ctx) : _ctx(ctx)
+InitializerGenerator::InitializerGenerator(const neurun::graph::operand::Set &ctx) : _ctx(ctx)
{
// DO NOTHING
}
#include "internal/IInitializerGenerator.h"
-#include "internal/Model.h"
+#include "graph/operand/Set.h"
namespace neurun
{
class InitializerGenerator : public ::internal::IInitializerGenerator
{
public:
- InitializerGenerator(const ::internal::tflite::operand::Set &ctx);
+ InitializerGenerator(const neurun::graph::operand::Set &ctx);
Initializer generateWeight(const ::internal::tflite::op::Conv2D::implicit::Node &node) override;
Initializer generateWeight(const ::internal::tflite::op::FullyConnected::Node &node) override;
Initializer generateBias(const ::internal::tflite::op::FullyConnected::Node &node) override;
private:
- const ::internal::tflite::operand::Set &_ctx;
+ const neurun::graph::operand::Set &_ctx;
};
} // namespace cpu
{
StageGenerator::StageGenerator(
- const ::internal::tflite::operand::Set &operand_ctx,
+ const neurun::graph::operand::Set &operand_ctx,
const std::shared_ptr<TensorBuilder> &tensor_builder,
const std::shared_ptr<::internal::common::TensorBuilder> &common_tensor_builder)
: _ctx(operand_ctx), _tensor_builder(tensor_builder),
#include "internal/IStageGenerator.h"
-#include "internal/Model.h"
+#include "graph/operand/Set.h"
#include "internal/cpu.h"
#include "TensorBuilder.h"
class StageGenerator : public ::internal::IStageGenerator
{
public:
- StageGenerator(const ::internal::tflite::operand::Set &ctx,
+ StageGenerator(const neurun::graph::operand::Set &ctx,
const std::shared_ptr<TensorBuilder> &tensor_builder,
const std::shared_ptr<::internal::common::TensorBuilder> &common_tensor_builder);
generate(const ::internal::tflite::op::TensorConvert::AclToCommon::Node &node) override;
private:
- const ::internal::tflite::operand::Set &_ctx;
+ const neurun::graph::operand::Set &_ctx;
std::shared_ptr<TensorBuilder> _tensor_builder;
std::shared_ptr<::internal::common::TensorBuilder> _common_tensor_builder;
};
#include <NeuralNetworks.h>
+#include <algorithm>
#include <typeindex>
#include <arm_compute/core/CL/ICLTensor.h>
class Planner : public ::internal::tflite::op::NodeVisitor
{
public:
- Planner(const ::internal::tflite::operand::Set &ctx, IPlanBuilder &builder,
+ Planner(const neurun::graph::operand::Set &ctx, IPlanBuilder &builder,
BackendResolver &backend_resolver)
: _ctx{ctx}, _builder{builder}, _backend_resolver(backend_resolver)
{
void visit(const ::internal::tflite::op::TensorConvert::AclToCommon::Node &node) override;
private:
- const ::internal::tflite::operand::Set &_ctx;
+ const neurun::graph::operand::Set &_ctx;
IPlanBuilder &_builder;
BackendResolver &_backend_resolver;
};
auto tensor_builders = backend_resolver.getAllTensorBuilders();
auto common_tensor_builder = backend_resolver.getCommonTensorBuilder();
- // Mark tensors
- const auto &operations = _plan.model().operations();
-
// Prepare tensors
for (auto &tensor_builder : tensor_builders)
{
auto &plan = this->plan();
const auto &operands = plan.model().operands();
- auto &operations = plan.model().operations();
+
+ // Get linearized ops
+ std::vector<const ::internal::tflite::op::Node *> operations;
+ {
+ plan.model().iteratePostDfs([&](const neurun::graph::operation::Node &node) {
+ auto op = node.op();
+ operations.emplace_back(op);
+ // dynamic_cast<const ::internal::tflite::op::Conv2D::implicit::Node*>(op)
+ });
+
+ std::reverse(std::begin(operations), std::end(operations));
+ }
::internal::BackendManager backend_manager{plan};
BackendResolver backend_resolver{backend_manager};
for (uint32_t n = 0; n < operations.size(); ++n)
{
- const auto &op = operations.at(n);
+ const auto &op = *operations.at(n);
auto tensor_builder = backend_resolver.getTensorBuilder(typeid(op));
op.accept(TensorMarker{*tensor_builder});
}
+#if 0 // Tensor Conversion disabled
auto tensor_builders = backend_resolver.getAllTensorBuilders();
for (auto tensor_builder : tensor_builders)
{
tensor_builder->insertTensorConvertNodes(operations);
}
+#endif
for (uint32_t n = 0; n < operations.size(); ++n)
{
- operations.at(n).accept(Planner{operands, plan_builder, backend_resolver});
+ const auto &op = *operations.at(n);
+ op.accept(Planner{operands, plan_builder, backend_resolver});
}
// TODO Add optimization passes
#ifndef __COMPILATION_H__
#define __COMPILATION_H__
-#include "internal/Model.h"
#include "internal/Plan.h"
+#include "graph/Graph.h"
struct ANeuralNetworksCompilation
{
public:
- ANeuralNetworksCompilation(const std::shared_ptr<internal::tflite::Model> &model)
+ ANeuralNetworksCompilation(const std::shared_ptr<neurun::graph::Graph> &model)
: _plan{new internal::Plan{model}}
{
// DO NOTHING
public:
ANeuralNetworksExecution(const std::shared_ptr<const internal::Plan> &plan) : _plan{plan}
{
- _sources.resize(_plan->model().inputs.size());
- _sinks.resize(_plan->model().outputs.size());
+ _sources.resize(_plan->model().inputs().size());
+ _sinks.resize(_plan->model().outputs().size());
}
public:
return ANEURALNETWORKS_UNEXPECTED_NULL;
}
- std::shared_ptr<internal::tflite::Model> internal;
+ std::shared_ptr<neurun::graph::Graph> internal;
model->release(internal);
// NOTE The current implemenation assumes that every input is a feature map.
// TODO Remove this assumption
- const auto operand_index = execution->plan().model().inputs.at(index);
+ const auto operand_index = execution->plan().model().inputs().at(index);
if (operands.at(operand_index).shape().rank() == 2)
{
// NOTE The current implemenation assumes that every output is a feature map.
// TODO Remove this assumption
- const auto operand_index = execution->plan().model().outputs.at(index);
+ const auto operand_index = execution->plan().model().outputs().list().at(index);
if (operands.at(operand_index).shape().rank() == 2)
{
const auto &model = plan.model();
// Set input(s)
- for (uint32_t n = 0; n < model.inputs.size(); ++n)
+ for (uint32_t n = 0; n < model.inputs().size(); ++n)
{
auto setter = [&](::arm_compute::ITensor &tensor) { execution->source(n).push(tensor); };
- auto objects = plan.common_operands().at(model.inputs.at(n));
+ ::internal::tflite::operand::Index index{model.inputs().at(n).asInt()};
+ auto objects = plan.operands().at(index);
for (auto object : objects)
{
}
// Get output(s)
- for (uint32_t n = 0; n < model.outputs.size(); ++n)
+ for (uint32_t n = 0; n < model.outputs().size(); ++n)
{
auto getter = [&](::arm_compute::ITensor &tensor) { execution->sink(n).pull(tensor); };
- auto objects = plan.common_operands().at(model.outputs.at(n));
+ ::internal::tflite::operand::Index index{model.outputs().at(n).asInt()};
+ auto objects = plan.operands().at(index);
for (auto object : objects)
{
#include <stdexcept>
#include <new>
+#include "nnfw/std/memory.h"
+
+#include "graph/Graph.h"
#include "model.h"
#include "memory.h"
+#include "graph/operation/AvgPool2D.h"
+#include "graph/operation/Concat.h"
+#include "graph/operation/Conv2D.h"
+#include "graph/operation/FullyConnected.h"
+#include "graph/operation/MaxPool2D.h"
+#include "graph/operation/Reshape.h"
+#include "graph/operation/Softmax.h"
int ANeuralNetworksModel_create(ANeuralNetworksModel **model)
{
shape.set(type->type, type->scale, type->zeroPoint);
- model->deref().operands().append(shape);
+ model->deref().addOperand(shape);
// NOTE We do NOT allocate CLTensor here as we do not how to interpret this one.
// TensorFlow Lite may interpret a rank-4 tensor either as a feature map (with batch) or
return ANEURALNETWORKS_BAD_STATE;
}
- const internal::tflite::operand::Index ind{index};
- auto &obj = model->deref().operands().at(ind);
-
- using internal::tflite::operand::CachedData;
+ const neurun::graph::operand::Index ind(index);
+ auto &obj = model->deref().operands().at(ind);
if (!obj.setAsConstant())
{
return ANEURALNETWORKS_BAD_DATA;
}
- obj.data<CachedData>(reinterpret_cast<const uint8_t *>(buffer), length);
+
+ using internal::tflite::operand::CachedData;
+
+ model->deref().setOperandValue(
+ ind, nnfw::make_unique<CachedData>(reinterpret_cast<const uint8_t *>(buffer), length));
return ANEURALNETWORKS_NO_ERROR;
}
return ANEURALNETWORKS_BAD_STATE;
}
- const internal::tflite::operand::Index ind{index};
- auto &obj = model->deref().operands().at(ind);
-
- using internal::tflite::operand::ExternalData;
+ const neurun::graph::operand::Index ind(index);
+ auto &obj = model->deref().operands().at(ind);
if (!obj.setAsConstant())
{
return ANEURALNETWORKS_BAD_DATA;
}
- obj.data<ExternalData>(reinterpret_cast<const uint8_t *>(memory->base() + offset), length);
+
+ using internal::tflite::operand::ExternalData;
+
+ model->deref().setOperandValue(
+ ind, nnfw::make_unique<ExternalData>(
+ reinterpret_cast<const uint8_t *>(memory->base() + offset), length));
return ANEURALNETWORKS_NO_ERROR;
}
}
}
+ auto &graph = model->deref();
+
switch (type)
{
case ANEURALNETWORKS_CONV_2D:
{
using internal::tflite::op::Conv2D::implicit::Param;
using internal::tflite::op::Conv2D::implicit::Node;
+ using GraphNode = neurun::graph::operation::Conv2D::Implicit::Node;
- // Add 'operations'
- auto &operations = model->deref().operations();
-
- operations.emplace_back<Node>(Param{inputCount, inputs, outputCount, outputs});
+ graph.addOperation(nnfw::make_unique<GraphNode>(
+ nnfw::make_unique<Node>(Param{inputCount, inputs, outputCount, outputs})));
}
else
{
{
using internal::tflite::op::MaxPool2D::implicit::Param;
using internal::tflite::op::MaxPool2D::implicit::Node;
+ using GraphNode = neurun::graph::operation::MaxPool2D::Implicit::Node;
- // Add 'operations'
- auto &operations = model->deref().operations();
-
- operations.emplace_back<Node>(Param{inputCount, inputs, outputCount, outputs});
+ graph.addOperation(nnfw::make_unique<GraphNode>(
+ nnfw::make_unique<Node>(Param{inputCount, inputs, outputCount, outputs})));
}
else
{
{
using internal::tflite::op::AvgPool2D::implicit::Param;
using internal::tflite::op::AvgPool2D::implicit::Node;
+ using GraphNode = neurun::graph::operation::AvgPool2D::Implicit::Node;
- // Add 'operations'
- auto &operations = model->deref().operations();
-
- operations.emplace_back<Node>(Param{inputCount, inputs, outputCount, outputs});
+ graph.addOperation(nnfw::make_unique<GraphNode>(
+ nnfw::make_unique<Node>(Param{inputCount, inputs, outputCount, outputs})));
}
else
{
{
using internal::tflite::op::Concat::Param;
using internal::tflite::op::Concat::Node;
+ using GraphNode = neurun::graph::operation::Concat::Node;
- // Add 'operations'
- auto &operations = model->deref().operations();
-
- operations.emplace_back<Node>(Param{inputCount, inputs, outputCount, outputs});
+ graph.addOperation(nnfw::make_unique<GraphNode>(
+ nnfw::make_unique<Node>(Param{inputCount, inputs, outputCount, outputs})));
break;
}
{
using internal::tflite::op::Reshape::Param;
using internal::tflite::op::Reshape::Node;
+ using GraphNode = neurun::graph::operation::Reshape::Node;
- // Add 'operations'
- auto &operations = model->deref().operations();
-
- operations.emplace_back<Node>(Param{inputCount, inputs, outputCount, outputs});
+ graph.addOperation(nnfw::make_unique<GraphNode>(
+ nnfw::make_unique<Node>(Param{inputCount, inputs, outputCount, outputs})));
break;
}
{
using internal::tflite::op::FullyConnected::Param;
using internal::tflite::op::FullyConnected::Node;
+ using GraphNode = neurun::graph::operation::FullyConnected::Node;
- // Add 'operations'
- auto &operations = model->deref().operations();
-
- operations.emplace_back<Node>(Param{inputCount, inputs, outputCount, outputs});
+ graph.addOperation(nnfw::make_unique<GraphNode>(
+ nnfw::make_unique<Node>(Param{inputCount, inputs, outputCount, outputs})));
break;
}
{
using internal::tflite::op::Softmax::Param;
using internal::tflite::op::Softmax::Node;
+ using GraphNode = neurun::graph::operation::Softmax::Node;
- // Add 'operations'
- auto &operations = model->deref().operations();
-
- operations.emplace_back<Node>(Param{inputCount, inputs, outputCount, outputs});
+ graph.addOperation(nnfw::make_unique<GraphNode>(
+ nnfw::make_unique<Node>(Param{inputCount, inputs, outputCount, outputs})));
break;
}
// Below, static_cast<int>(...) is introduced to eliminate compiler warning.
for (uint32_t n = 0; n < inputCount; ++n)
{
- const ::internal::tflite::operand::Index ind{static_cast<int>(inputs[n])};
- model->deref().inputs.emplace_back(ind);
+ const neurun::graph::operand::Index ind{static_cast<uint32_t>(inputs[n])};
+ model->deref().addInput(ind);
auto &obj = model->deref().operands().at(ind);
if (!obj.setAsModelInput())
for (uint32_t n = 0; n < outputCount; ++n)
{
- const ::internal::tflite::operand::Index ind{static_cast<int>(outputs[n])};
- model->deref().outputs.emplace_back(ind);
+ const neurun::graph::operand::Index ind{static_cast<uint32_t>(outputs[n])};
+ model->deref().addOutput(ind);
auto &obj = model->deref().operands().at(ind);
// Model output cannot become model input
const operand::IndexSet &inputs() const { return _inputs; }
const operand::IndexSet &outputs() const { return _outputs; }
const operand::Set &operands() const { return _operands; }
+ operand::Set &operands() { return _operands; } // TODO Remove this non-const accessor
public:
// TODO Introduce Iterator class to support many kinds of interation
#include <arm_compute/core/TensorInfo.h>
#include "internal/Model.h"
+#include "graph/operand/Index.h"
namespace internal
{
#ifndef __INTERNAL_MODEL_H__
#define __INTERNAL_MODEL_H__
+#include "graph/operand/Index.h"
+
namespace internal
{
namespace tflite
// DO NOTHING
}
+ // NOTE Temporary casting operator for legacy code compatibility
+public:
+ operator neurun::graph::operand::Index() const { return neurun::graph::operand::Index{_value}; }
+
public:
int asInt(void) const { return _value; }
#ifndef __INTERNAL_PLAN_H__
#define __INTERNAL_PLAN_H__
-#include "internal/Model.h"
+#include "graph/Graph.h"
#include "internal/IObject.h"
#include <map>
class Plan
{
public:
- Plan(const std::shared_ptr<::internal::tflite::Model> &model) : _model(model)
+ Plan(const std::shared_ptr<neurun::graph::Graph> &model) : _model(model)
{
// DO NOTHING
}
public:
- ::internal::tflite::Model &model(void) { return *_model; }
- const ::internal::tflite::Model &model(void) const { return *_model; }
+ neurun::graph::Graph &model(void) { return *_model; }
+ const neurun::graph::Graph &model(void) const { return *_model; }
public:
operand::Context &operands(void) { return _operands; }
const op::Sequence &operations(void) const { return _ops; }
private:
- std::shared_ptr<::internal::tflite::Model> _model;
+ std::shared_ptr<neurun::graph::Graph> _model;
operand::Context _operands;
operand::Context _common_operands;
op::Sequence _ops;
public:
void pull(::arm_compute::ITensor &tensor) const override
{
+// NOTE Leave common tensor conversion code before Graph IR supports tensor conversion
+#if 0
// Only for common tensor now
- assert(typeid(tensor) == typeid(::internal::common::Tensor));
+ assert(typeid(tensor) == typeid(::internal::common::Tensor));
+#endif
float *base = reinterpret_cast<float *>(_base);
public:
void pull(::arm_compute::ITensor &tensor) const override
{
+ // TODO: This is just workaround codes, It needs to refactor.
+ if (typeid(tensor) == typeid(::internal::cpu::Tensor))
+ {
+ const ::internal::nnapi::feature::Reader<float> from{_shape, tensor.buffer(), _size};
+ ::internal::nnapi::feature::View<float> into{_shape, _base, _size};
+
+ ::nnfw::util::feature::iterate(_shape)
+ << [&](uint32_t bat, uint32_t ch, uint32_t row, uint32_t col) {
+ const auto value = from.at(bat, ch, row, col);
+ into.at(bat, ch, row, col) = value;
+ };
+ }
+ else if (typeid(tensor) == typeid(::arm_compute::CLTensor))
+ {
+ const ::internal::arm_compute::feature::View<float> from{&tensor};
+ ::internal::nnapi::feature::View<float> into{_shape, _base, _size};
+
+ ::nnfw::util::feature::iterate(_shape)
+ << [&](uint32_t bat, uint32_t ch, uint32_t row, uint32_t col) {
+ const auto value = from.at(bat, ch, row, col);
+ into.at(bat, ch, row, col) = value;
+ };
+ }
+
+// NOTE Leave common tensor conversion code before Graph IR supports tensor conversion
+#if 0
// Only for common tensor now
assert(typeid(tensor) == typeid(::internal::common::Tensor));
const auto value = from.at(batch, ch, row, col);
into.at(batch, ch, row, col) = value;
};
+#endif
}
private:
public:
void push(::arm_compute::ITensor &tensor) const override
{
+// NOTE Leave common tensor conversion code before Graph IR supports tensor conversion
+#if 0
// Only for common tensor now
assert(typeid(tensor) == typeid(::internal::common::Tensor));
+#endif
auto base = reinterpret_cast<const float *>(_base);
public:
void push(::arm_compute::ITensor &tensor) const override
{
+ // TODO: This is just workaround codes, It needs to refactor.
+ if (typeid(tensor) == typeid(::internal::cpu::Tensor))
+ {
+ const ::internal::nnapi::feature::Reader<float> from{_shape, _base, _size};
+ ::internal::nnapi::feature::View<float> into{_shape, tensor.buffer(), _size};
+
+ ::nnfw::util::feature::iterate(_shape)
+ << [&](uint32_t bat, uint32_t ch, uint32_t row, uint32_t col) {
+ const auto value = from.at(bat, ch, row, col);
+ into.at(bat, ch, row, col) = value;
+ };
+ }
+ else if (typeid(tensor) == typeid(::arm_compute::CLTensor))
+ {
+ const ::internal::nnapi::feature::Reader<float> from{_shape, _base, _size};
+ ::internal::arm_compute::feature::View<float> into{&tensor};
+
+ ::nnfw::util::feature::iterate(_shape)
+ << [&](uint32_t bat, uint32_t ch, uint32_t row, uint32_t col) {
+ const auto value = from.at(bat, ch, row, col);
+ into.at(bat, ch, row, col) = value;
+ };
+ }
+
+// NOTE Leave common tensor conversion code before Graph IR supports tensor conversion
+#if 0
// Only for common tensor now
assert(typeid(tensor) == typeid(::internal::common::Tensor));
const auto value = from.at(batch, ch, row, col);
into.at(batch, ch, row, col) = value;
};
+#endif
}
private:
//
// ANeuralNetworksModel
//
-ANeuralNetworksModel::ANeuralNetworksModel() : _model{new internal::tflite::Model}, _finished(false)
+ANeuralNetworksModel::ANeuralNetworksModel() : _model{new neurun::graph::Graph}, _finished(false)
{
// DO NOTHING
}
#include <NeuralNetworks.h>
-#include "internal/Model.h"
+#include "graph/Graph.h"
struct ANeuralNetworksModel
{
ANeuralNetworksModel();
public:
- internal::tflite::Model &deref(void) { return *_model; }
+ neurun::graph::Graph &deref(void) { return *_model; }
ResultCode finish();
bool isFinished() { return _finished; }
public:
- void release(std::shared_ptr<internal::tflite::Model> &model) { model = _model; }
+ void release(std::shared_ptr<neurun::graph::Graph> &model) { model = _model; }
private:
- std::shared_ptr<internal::tflite::Model> _model;
+ std::shared_ptr<neurun::graph::Graph> _model;
bool _finished;
};