#include <arm_compute/core/TensorInfo.h>
#include "graph/operand/Index.h"
+#include "codegen/Plan.h"
namespace neurun
{
virtual ~ITensorBuilder(void) = default;
virtual void mark(const ::neurun::graph::operand::Index &ind) = 0;
// TODO Add an interface for adding subsumption info
- virtual void prepare(const std::map<int, ::arm_compute::TensorInfo> &tensor_info_ctx) = 0;
+ virtual void prepare(codegen::Plan &plan,
+ const std::map<int, ::arm_compute::TensorInfo> &tensor_info_ctx) = 0;
virtual void allocate(void) = 0;
};
namespace acl_cl
{
-TensorBuilder::TensorBuilder(codegen::Plan &plan) : _plan(plan)
+TensorBuilder::TensorBuilder()
{
// DO NOTHING
}
_inds.insert(ind.asInt());
}
-void TensorBuilder::prepare(const std::map<int, ::arm_compute::TensorInfo> &tensor_info_ctx)
+void TensorBuilder::prepare(codegen::Plan &plan,
+ const std::map<int, ::arm_compute::TensorInfo> &tensor_info_ctx)
{
assert(_tensors.size() == 0);
::neurun::graph::operand::Index ind{ind_int};
auto tensor = std::make_shared<::arm_compute::CLTensor>();
tensor->allocator()->init(tensor_info_ctx.at(ind.asInt()));
- _plan.operands().set(ind, std::make_shared<operand::Object>(tensor));
+ plan.operands().set(ind, std::make_shared<operand::Object>(tensor));
_tensors[ind.asInt()] = tensor;
}
}
#define __NEURUN_BACKEND_ACL_CL_TENSOR_BUILDER_H__
#include "backend/ITensorBuilder.h"
-#include "codegen/Plan.h"
#include <unordered_map>
#include <unordered_set>
class TensorBuilder : public ITensorBuilder
{
public:
- TensorBuilder(codegen::Plan &plan);
+ TensorBuilder();
virtual void mark(const ::neurun::graph::operand::Index &ind) override;
- virtual void prepare(const std::map<int, ::arm_compute::TensorInfo> &tensor_info_ctx) override;
+ virtual void prepare(codegen::Plan &plan,
+ const std::map<int, ::arm_compute::TensorInfo> &tensor_info_ctx) override;
virtual void allocate(void) override;
std::shared_ptr<::arm_compute::CLTensor> at(const ::neurun::graph::operand::Index &ind);
private:
- codegen::Plan &_plan;
std::unordered_set<int> _inds;
std::unordered_map<int, std::shared_ptr<::arm_compute::CLTensor>> _tensors;
};
namespace cpu
{
-TensorBuilder::TensorBuilder(codegen::Plan &plan) : _plan(plan)
+TensorBuilder::TensorBuilder()
{
// DO NOTHING
}
_inds.insert(ind.asInt());
}
-void TensorBuilder::prepare(const std::map<int, ::arm_compute::TensorInfo> &tensor_info_ctx)
+void TensorBuilder::prepare(codegen::Plan &plan,
+ const std::map<int, ::arm_compute::TensorInfo> &tensor_info_ctx)
{
assert(_tensors.size() == 0);
auto tensor = std::make_shared<operand::Tensor>(tensor_info_ctx.at(ind.asInt()));
// TODO Fix allocation here. When Tensor object is created the memory for tensor is also
// allocated, and this must be fixed.
- _plan.operands().set(ind, std::make_shared<operand::Object>(tensor));
+ plan.operands().set(ind, std::make_shared<operand::Object>(tensor));
_tensors[ind.asInt()] = tensor;
}
}
#include "backend/ITensorBuilder.h"
#include "backend/cpu/operand/Tensor.h"
-#include "codegen/Plan.h"
namespace neurun
{
class TensorBuilder : public ITensorBuilder
{
public:
- TensorBuilder(codegen::Plan &plan);
+ TensorBuilder();
virtual void mark(const ::neurun::graph::operand::Index &ind) override;
- virtual void prepare(const std::map<int, ::arm_compute::TensorInfo> &tensor_info_ctx) override;
+ virtual void prepare(codegen::Plan &plan,
+ const std::map<int, ::arm_compute::TensorInfo> &tensor_info_ctx) override;
virtual void allocate(void) override;
std::shared_ptr<operand::Tensor> at(const ::neurun::graph::operand::Index &ind);
private:
- codegen::Plan &_plan;
std::unordered_set<int> _inds;
std::unordered_map<int, std::shared_ptr<operand::Tensor>> _tensors;
};
namespace codegen
{
-std::shared_ptr<neurun::backend::IInitializerGenerator>
-BackendResolver::getInitializerGenerator(const std::type_index &type)
-{
- return _gen_map.at(type).initializer_gen;
-}
-
-std::shared_ptr<neurun::backend::IStageGenerator>
-BackendResolver::getStageGenerator(const std::type_index &type)
-{
- return _gen_map.at(type).stage_gen;
-}
-
-std::shared_ptr<neurun::backend::ITensorBuilder>
-BackendResolver::getTensorBuilder(const std::type_index &type)
-{
- return getStageGenerator(type)->tensor_builder();
-}
-
-std::set<std::shared_ptr<neurun::backend::ITensorBuilder>> BackendResolver::getAllTensorBuilders()
-{
- std::set<std::shared_ptr<neurun::backend::ITensorBuilder>> ret;
- for (const auto &it : _gen_map)
- {
- ret.insert(it.second.stage_gen->tensor_builder());
- }
- return ret;
-}
+// NOT IMPLEMENTED
} // namespace neurun
} // namespace codegen
class BackendResolver
{
public:
- BackendResolver(::internal::BackendManager &backend_manager) : _backend_manager(backend_manager)
+ BackendResolver(const neurun::graph::operand::Set &operands)
{
+ _backend_manager = std::make_shared<::internal::BackendManager>(operands);
+
const auto &backend_all_str =
::nnfw::util::EnvVar{std::string("OP_BACKEND_ALLOPS")}.asString("none");
if (backend_all_str.compare("none") != 0)
VERBOSE(BackendResolver) << "Use backend for all ops: " << backend_all_str << std::endl;
#define OP(InternalName, NnApiName) \
{ \
- auto backend = _backend_manager.get(backend_all_str); \
+ auto backend = _backend_manager->get(backend_all_str); \
_gen_map[typeid(graph::operation::InternalName::Node)] = backend; \
}
#include "graph/operation/Op.lst"
{ \
const auto &backend_str = \
::nnfw::util::EnvVar{std::string("OP_BACKEND_") + #NnApiName}.asString("acl_cl"); \
- auto backend = _backend_manager.get(backend_str); \
+ auto backend = _backend_manager->get(backend_str); \
VERBOSE(BackendResolver) << "backend for " << #NnApiName << ": " << backend_str << std::endl; \
_gen_map[typeid(graph::operation::InternalName::Node)] = backend; \
}
}
}
- std::shared_ptr<neurun::backend::IInitializerGenerator>
- getInitializerGenerator(const std::type_index &type);
- std::shared_ptr<neurun::backend::IStageGenerator> getStageGenerator(const std::type_index &type);
- std::shared_ptr<neurun::backend::ITensorBuilder> getTensorBuilder(const std::type_index &type);
- std::set<std::shared_ptr<neurun::backend::ITensorBuilder>> getAllTensorBuilders();
+public:
+ const ::internal::Backend &getBackend(const std::type_index &type) { return _gen_map[type]; }
private:
std::unordered_map<std::type_index, ::internal::Backend> _gen_map;
- ::internal::BackendManager &_backend_manager;
+ std::shared_ptr<::internal::BackendManager> _backend_manager;
};
} // namespace codegen
virtual void addShapeConstr(const ::neurun::graph::operand::Index &ind,
const ::arm_compute::TensorInfo &info) = 0;
+ virtual void addTensorBuilder(std::shared_ptr<backend::ITensorBuilder> tensor_builder) = 0;
virtual void addInitializer(const ::neurun::graph::operand::Index &ind,
const Initializer &initializer) = 0;
virtual void addStage(const Stage &) = 0;
_tensor_info_ctx[ind.asInt()] = info;
}
+void PlanBuilder::addTensorBuilder(std::shared_ptr<backend::ITensorBuilder> tensor_builder)
+{
+ _tensor_builders.insert(tensor_builder);
+}
+
void PlanBuilder::addInitializer(const ::neurun::graph::operand::Index &ind,
const Initializer &initializer)
{
void PlanBuilder::addStage(const Stage &stage) { _stages.emplace_back(stage); }
-void PlanBuilder::finalize(BackendResolver &backend_resolver)
+void PlanBuilder::finalize()
{
- auto tensor_builders = backend_resolver.getAllTensorBuilders();
-
// Prepare tensors
- for (auto &tensor_builder : tensor_builders)
+ for (auto &tensor_builder : _tensor_builders)
{
- tensor_builder->prepare(_tensor_info_ctx);
+ tensor_builder->prepare(_plan, _tensor_info_ctx);
}
// Process Stage
// TODO Add code for CPU/ACL tensor allocation
// Allocate Tensor Memory for cl_tensors
- for (auto &tensor_builder : tensor_builders)
+ for (auto &tensor_builder : _tensor_builders)
{
tensor_builder->allocate();
}
#include "codegen/Plan.h"
#include "codegen/BackendResolver.h"
#include "backend/IStageGenerator.h"
+#include "backend/ITensorBuilder.h"
namespace neurun
{
const ::arm_compute::TensorInfo &info) override;
public:
+ void addTensorBuilder(std::shared_ptr<backend::ITensorBuilder> tensor_builder) override;
+
+public:
void addInitializer(const ::neurun::graph::operand::Index &ind,
const Initializer &initializer) override;
void addStage(const Stage &stage) override;
public:
- void finalize(BackendResolver &backend_resolver);
+ void finalize();
public:
const std::map<int, ::arm_compute::TensorInfo> &tensor_info_ctx() { return _tensor_info_ctx; }
private:
std::map<int, ::arm_compute::TensorInfo> _tensor_info_ctx;
std::map<int, Initializer> _initializer_ctx;
+ std::set<std::shared_ptr<backend::ITensorBuilder>> _tensor_builders;
std::vector<Stage> _stages;
};
#include "graph/operand/Set.h"
#include "codegen/IPlanBuilder.h"
#include "codegen/BackendResolver.h"
+#include "graph/operation/LowerInfo.h"
namespace neurun
{
_builder.addShapeConstr(ker_index, ::internal::asTensorInfo(ker_shape));
_builder.addShapeConstr(bias_index, ::internal::asTensorInfo(bias_size));
+ // backend
+ auto backend = node.lower_info()->backend();
+
// Generate Initializers
- auto init_gen = _backend_resolver.getInitializerGenerator(typeid(node));
+ auto init_gen = backend.initializer_gen();
_builder.addInitializer(ker_index, init_gen->generateWeight(node));
_builder.addInitializer(bias_index, init_gen->generateBias(node));
// Generate Stage
- auto stage_gen = _backend_resolver.getStageGenerator(typeid(node));
+ auto stage_gen = backend.stage_gen();
_builder.addStage(stage_gen->generate(node));
+
+ // Generate TensorBuilder
+ auto tensor_builder = backend.tensor_builder();
+ _builder.addTensorBuilder(tensor_builder);
}
void Planner::visit(const graph::operation::MaxPool2D::Implicit::Node &node)
_builder.addShapeConstr(ofm_index, ::internal::asTensorInfo(ofm_shape));
_builder.addShapeConstr(ifm_index, ::internal::asTensorInfo(ifm_shape));
+ // backend
+ auto backend = node.lower_info()->backend();
+
// Generate Stage
- auto stage_gen = _backend_resolver.getStageGenerator(typeid(node));
+ auto stage_gen = backend.stage_gen();
_builder.addStage(stage_gen->generate(node));
+
+ // Generate TensorBuilder
+ auto tensor_builder = backend.tensor_builder();
+ _builder.addTensorBuilder(tensor_builder);
}
void Planner::visit(const graph::operation::AvgPool2D::Implicit::Node &node)
_builder.addShapeConstr(ofm_index, ::internal::asTensorInfo(ofm_shape));
_builder.addShapeConstr(ifm_index, ::internal::asTensorInfo(ifm_shape));
+ // backend
+ auto backend = node.lower_info()->backend();
+
// Generate Stage
- auto stage_gen = _backend_resolver.getStageGenerator(typeid(node));
+ auto stage_gen = backend.stage_gen();
_builder.addStage(stage_gen->generate(node));
+
+ // Generate TensorBuilder
+ auto tensor_builder = backend.tensor_builder();
+ _builder.addTensorBuilder(tensor_builder);
}
void Planner::visit(const graph::operation::Concat::Node &node)
_builder.addShapeConstr(ifm_index, ::internal::asTensorInfo(ifm_shape));
}
+ // backend
+ auto backend = node.lower_info()->backend();
+
// Generate Stage
- auto stage_gen = _backend_resolver.getStageGenerator(typeid(node));
+ auto stage_gen = backend.stage_gen();
_builder.addStage(stage_gen->generate(node));
+
+ // Generate TensorBuilder
+ auto tensor_builder = backend.tensor_builder();
+ _builder.addTensorBuilder(tensor_builder);
}
void Planner::visit(const graph::operation::FullyConnected::Node &node)
::internal::asTensorInfo(num_output /*H*/, input_size /*W*/));
_builder.addShapeConstr(bias_index, ::internal::asTensorInfo(bias_size));
+ // backend
+ auto backend = node.lower_info()->backend();
+
// Generate Initializers
- auto init_gen = _backend_resolver.getInitializerGenerator(typeid(node));
+ auto init_gen = backend.initializer_gen();
_builder.addInitializer(weight_index, init_gen->generateWeight(node));
_builder.addInitializer(bias_index, init_gen->generateBias(node));
// Generate Stage
- auto stage_gen = _backend_resolver.getStageGenerator(typeid(node));
+ auto stage_gen = backend.stage_gen();
_builder.addStage(stage_gen->generate(node));
+
+ // Generate TensorBuilder
+ auto tensor_builder = backend.tensor_builder();
+ _builder.addTensorBuilder(tensor_builder);
}
void Planner::visit(const graph::operation::Reshape::Node &node)
_builder.addShapeConstr(output_index, ::internal::asTensorInfo(out_size));
_builder.addShapeConstr(input_index, ::internal::asTensorInfo(ifm_shape));
+ // backend
+ auto backend = node.lower_info()->backend();
+
// Generate Stage
- auto stage_gen = _backend_resolver.getStageGenerator(typeid(node));
+ auto stage_gen = backend.stage_gen();
_builder.addStage(stage_gen->generate(node));
+
+ // Generate TensorBuilder
+ auto tensor_builder = backend.tensor_builder();
+ _builder.addTensorBuilder(tensor_builder);
}
void Planner::visit(const graph::operation::Softmax::Node &node)
_builder.addShapeConstr(output_index, ::internal::asTensorInfo(len));
_builder.addShapeConstr(input_index, ::internal::asTensorInfo(len));
+ // backend
+ auto backend = node.lower_info()->backend();
+
// Generate Stage
- auto stage_gen = _backend_resolver.getStageGenerator(typeid(node));
+ auto stage_gen = backend.stage_gen();
_builder.addStage(stage_gen->generate(node));
+
+ // Generate TensorBuilder
+ auto tensor_builder = backend.tensor_builder();
+ _builder.addTensorBuilder(tensor_builder);
}
void Planner::visit(const graph::operation::NOP::Node & /* node */)
class Planner : public graph::operation::NodeVisitor
{
public:
- Planner(const neurun::graph::operand::Set &ctx, neurun::codegen::IPlanBuilder &builder,
- neurun::codegen::BackendResolver &backend_resolver)
- : _ctx{ctx}, _builder{builder}, _backend_resolver(backend_resolver)
+ Planner(const neurun::graph::operand::Set &ctx, neurun::codegen::IPlanBuilder &builder)
+ : _ctx{ctx}, _builder{builder}
{
}
private:
const neurun::graph::operand::Set &_ctx;
neurun::codegen::IPlanBuilder &_builder;
- neurun::codegen::BackendResolver &_backend_resolver;
};
} // namespace codegen
// Dump ops
linear->accept(neurun::codegen::Dumper{});
- ::internal::BackendManager backend_manager{plan};
- neurun::codegen::BackendResolver backend_resolver{backend_manager};
neurun::codegen::PlanBuilder plan_builder{plan};
- linear->markTensors(backend_resolver);
+ linear->markTensors();
- linear->accept(neurun::codegen::Planner{operands, plan_builder, backend_resolver});
+ linear->accept(neurun::codegen::Planner{operands, plan_builder});
// TODO Add optimization passes
- plan_builder.finalize(backend_resolver);
+ plan_builder.finalize();
return ANEURALNETWORKS_NO_ERROR;
}
#include "nnfw/std/memory.h"
#include "linear/Linear.h"
#include "operation/LowerInfo.h"
+#include "codegen/BackendResolver.h"
namespace neurun
{
// Lower
{
+ auto _backend_resolver = neurun::codegen::BackendResolver(_operands);
_operations.iterate([&](const operation::Index &, operation::Node &node) {
- // TODO Update backend id accordingly. Currently "acl_cl" by default
- node.lower_info(nnfw::make_unique<operation::LowerInfo>(std::string("acl_cl")));
+ auto backend = _backend_resolver.getBackend(typeid(node));
+ node.lower_info(nnfw::make_unique<operation::LowerInfo>(backend));
});
}
namespace operation
{
-LowerInfo::LowerInfo(const std::string &backend_id) : _backend_id(backend_id)
+LowerInfo::LowerInfo(const internal::Backend &backend) : _backend(backend)
{
// DO NOTHING
}
#include <string>
+#include "internal/BackendManager.h"
+
namespace neurun
{
namespace graph
class LowerInfo
{
public:
- LowerInfo(const std::string &backend_id);
- const std::string &backend_id() const { return _backend_id; }
+ LowerInfo(const internal::Backend &backend);
+ const internal::Backend &backend() const { return _backend; }
private:
- const std::string _backend_id;
+ internal::Backend _backend;
};
} // namespace operation
Backend::Backend(const std::shared_ptr<neurun::backend::IBackendInitializer> &backend_initializer,
const std::shared_ptr<neurun::backend::IInitializerGenerator> &initializer_gen,
const std::shared_ptr<neurun::backend::IStageGenerator> &stage_gen)
- : initializer_gen(initializer_gen), stage_gen(stage_gen)
+ : _initializer_gen(initializer_gen), _stage_gen(stage_gen)
{
backend_initializer->initialize();
}
-BackendManager::BackendManager(neurun::codegen::Plan &plan) : _plan(plan)
+const std::shared_ptr<neurun::backend::IInitializerGenerator> Backend::initializer_gen() const
{
- const auto &operands = _plan.model().operands();
+ return _initializer_gen;
+}
+
+const std::shared_ptr<neurun::backend::IStageGenerator> Backend::stage_gen() const
+{
+ return _stage_gen;
+}
+const std::shared_ptr<neurun::backend::ITensorBuilder> Backend::tensor_builder() const
+{
+ return _stage_gen->tensor_builder();
+}
+
+BackendManager::BackendManager(const neurun::graph::operand::Set &operands)
+{
// Add arm_compute backend
{
using namespace ::neurun::backend::acl_cl;
auto acl_backend_initializer = std::make_shared<BackendInitializer>();
- auto acl_tensor_builder = std::make_shared<TensorBuilder>(_plan);
+ auto acl_tensor_builder = std::make_shared<TensorBuilder>();
auto acl_initializer_gen = std::make_shared<InitializerGenerator>(operands);
auto acl_stage_gen = std::make_shared<StageGenerator>(operands, acl_tensor_builder);
{
using namespace ::neurun::backend::cpu;
auto cpu_backend_initializer = std::make_shared<BackendInitializer>();
- auto cpu_tensor_builder = std::make_shared<TensorBuilder>(_plan);
+ auto cpu_tensor_builder = std::make_shared<TensorBuilder>();
auto cpu_initializer_gen = std::make_shared<InitializerGenerator>(operands);
auto cpu_stage_gen = std::make_shared<StageGenerator>(operands, cpu_tensor_builder);
#include <memory>
-#include "codegen/Plan.h"
+#include "graph/operand/Set.h"
namespace neurun
{
namespace internal
{
-struct Backend
+class Backend
{
- std::shared_ptr<neurun::backend::IInitializerGenerator> initializer_gen;
- std::shared_ptr<neurun::backend::IStageGenerator> stage_gen;
-
+public:
Backend(const std::shared_ptr<neurun::backend::IBackendInitializer> &backend_initializer,
const std::shared_ptr<neurun::backend::IInitializerGenerator> &initializer_gen,
const std::shared_ptr<neurun::backend::IStageGenerator> &stage_gen);
- Backend(void) : initializer_gen(nullptr), stage_gen(nullptr)
+ Backend(void) : _initializer_gen(nullptr), _stage_gen(nullptr)
{
// DO NOTHING
}
+
+public:
+ const std::shared_ptr<neurun::backend::IInitializerGenerator> initializer_gen() const;
+ const std::shared_ptr<neurun::backend::IStageGenerator> stage_gen() const;
+ const std::shared_ptr<neurun::backend::ITensorBuilder> tensor_builder() const;
+
+private:
+ std::shared_ptr<neurun::backend::IInitializerGenerator> _initializer_gen;
+ std::shared_ptr<neurun::backend::IStageGenerator> _stage_gen;
};
class BackendManager
{
public:
- BackendManager(neurun::codegen::Plan &plan);
+ BackendManager(const neurun::graph::operand::Set &operands);
Backend get(const std::string &key);
private:
- neurun::codegen::Plan &_plan;
std::map<std::string, Backend> _gen_map;
};
#include "graph/Graph.h"
#include "codegen/BackendResolver.h"
+#include "graph/operation/LowerInfo.h"
namespace neurun
{
}
}
-void Linear::markTensors(neurun::codegen::BackendResolver &resolver) const
+void Linear::markTensors() const
{
for (const auto op : _operations)
{
- auto tensor_builder = resolver.getTensorBuilder(typeid(*op));
+ const auto tensor_builder = op->lower_info()->backend().stage_gen()->tensor_builder();
for (const auto &ind : op->getInputs())
{
tensor_builder->mark(ind);
void accept(graph::operation::NodeVisitor &&visitor) const;
// TODO Remove this since tensor marking will be replaced with another way
- virtual void markTensors(neurun::codegen::BackendResolver &) const;
+ virtual void markTensors() const;
public:
private: