--- /dev/null
+#ifndef __NEURUN_CODEGEN_BACKEND_RESOLVER_H__
+#define __NEURUN_CODEGEN_BACKEND_RESOLVER_H__
+
+#include <set>
+#include <typeindex>
+
+#include "logging.h"
+#include "util/EnvVar.h"
+#include "internal/BackendManager.h"
+#include "backend/IInitializerGenerator.h"
+#include "backend/IStageGenerator.h"
+#include "internal/common/TensorBuilder.h"
+
+namespace neurun
+{
+namespace codegen
+{
+
+class BackendResolver
+{
+public:
+ BackendResolver(::internal::BackendManager &backend_manager) : _backend_manager(backend_manager)
+ {
+ const auto &backend_all_str =
+ ::nnfw::util::EnvVar{std::string("OP_BACKEND_ALLOPS")}.asString("none");
+ if (backend_all_str.compare("none") != 0)
+ {
+ VERBOSE(BackendResolver) << "Use backend for all ops: " << backend_all_str << std::endl;
+#define OP(InternalName, NnApiName) \
+ { \
+ auto backend = _backend_manager.get(backend_all_str); \
+ _gen_map[typeid(::internal::tflite::op::InternalName::Node)] = backend; \
+ }
+#include "internal/op/Op.lst"
+#undef OP
+ }
+ else
+ {
+#define OP(InternalName, NnApiName) \
+ { \
+ const auto &backend_str = \
+ ::nnfw::util::EnvVar{std::string("OP_BACKEND_") + #NnApiName}.asString("acl_cl"); \
+ auto backend = _backend_manager.get(backend_str); \
+ VERBOSE(BackendResolver) << "backend for " << #NnApiName << ": " << backend_str << std::endl; \
+ _gen_map[typeid(::internal::tflite::op::InternalName::Node)] = backend; \
+ }
+
+#include "internal/op/Op.lst"
+#undef OP
+ }
+
+ // TODO : It's just workaround. It's logic should be changed.
+ _gen_map[typeid(::internal::tflite::op::TensorConvert::CpuFromCommon::Node)] =
+ backend_manager.get("cpu");
+ _gen_map[typeid(::internal::tflite::op::TensorConvert::CpuToCommon::Node)] =
+ backend_manager.get("cpu");
+ _gen_map[typeid(::internal::tflite::op::TensorConvert::AclFromCommon::Node)] =
+ backend_manager.get("acl_cl");
+ _gen_map[typeid(::internal::tflite::op::TensorConvert::AclToCommon::Node)] =
+ backend_manager.get("acl_cl");
+ }
+
+ std::shared_ptr<neurun::backend::IInitializerGenerator>
+ getInitializerGenerator(const std::type_index &type);
+ std::shared_ptr<neurun::backend::IStageGenerator> getStageGenerator(const std::type_index &type);
+ std::shared_ptr<neurun::backend::ITensorBuilder> getTensorBuilder(const std::type_index &type);
+ std::set<std::shared_ptr<neurun::backend::ITensorBuilder>> getAllTensorBuilders();
+ std::shared_ptr<::internal::common::TensorBuilder> getCommonTensorBuilder();
+
+private:
+ std::unordered_map<std::type_index, ::internal::Backend> _gen_map;
+ ::internal::BackendManager &_backend_manager;
+};
+
+} // namespace codegen
+} // namespace neurun
+
+#endif // __NEURUN_CODEGEN_BACKEND_RESOLVER_H__
#include <NeuralNetworks.h>
#include <algorithm>
-#include <typeindex>
#include <arm_compute/core/CL/ICLTensor.h>
#include "backend/IStageGenerator.h"
#include "internal/common/Tensor.h"
#include "internal/common/TensorBuilder.h"
-#include "util/EnvVar.h"
#include "compilation.h"
#include "model.h"
using TensorSetter = std::function<void(int, const ::arm_compute::TensorInfo &)>;
#include "codegen/IPlanBuilder.h"
-#include "internal/BackendManager.h"
-
-class BackendResolver
-{
-public:
- BackendResolver(::internal::BackendManager &backend_manager) : _backend_manager(backend_manager)
- {
- const auto &backend_all_str =
- ::nnfw::util::EnvVar{std::string("OP_BACKEND_ALLOPS")}.asString("none");
- if (backend_all_str.compare("none") != 0)
- {
- VERBOSE(BackendResolver) << "Use backend for all ops: " << backend_all_str << std::endl;
-#define OP(InternalName, NnApiName) \
- { \
- auto backend = _backend_manager.get(backend_all_str); \
- _gen_map[typeid(::internal::tflite::op::InternalName::Node)] = backend; \
- }
-#include "internal/op/Op.lst"
-#undef OP
- }
- else
- {
-#define OP(InternalName, NnApiName) \
- { \
- const auto &backend_str = \
- ::nnfw::util::EnvVar{std::string("OP_BACKEND_") + #NnApiName}.asString("acl_cl"); \
- auto backend = _backend_manager.get(backend_str); \
- VERBOSE(BackendResolver) << "backend for " << #NnApiName << ": " << backend_str << std::endl; \
- _gen_map[typeid(::internal::tflite::op::InternalName::Node)] = backend; \
- }
-
-#include "internal/op/Op.lst"
-#undef OP
- }
-
- // TODO : It's just workaround. It's logic should be changed.
- _gen_map[typeid(::internal::tflite::op::TensorConvert::CpuFromCommon::Node)] =
- backend_manager.get("cpu");
- _gen_map[typeid(::internal::tflite::op::TensorConvert::CpuToCommon::Node)] =
- backend_manager.get("cpu");
- _gen_map[typeid(::internal::tflite::op::TensorConvert::AclFromCommon::Node)] =
- backend_manager.get("acl_cl");
- _gen_map[typeid(::internal::tflite::op::TensorConvert::AclToCommon::Node)] =
- backend_manager.get("acl_cl");
- }
-
- std::shared_ptr<neurun::backend::IInitializerGenerator>
- getInitializerGenerator(const std::type_index &type);
- std::shared_ptr<neurun::backend::IStageGenerator> getStageGenerator(const std::type_index &type);
- std::shared_ptr<neurun::backend::ITensorBuilder> getTensorBuilder(const std::type_index &type);
- std::set<std::shared_ptr<neurun::backend::ITensorBuilder>> getAllTensorBuilders();
- std::shared_ptr<::internal::common::TensorBuilder> getCommonTensorBuilder();
-
-private:
- std::unordered_map<std::type_index, ::internal::Backend> _gen_map;
- ::internal::BackendManager &_backend_manager;
-};
-
-std::shared_ptr<neurun::backend::IInitializerGenerator>
-BackendResolver::getInitializerGenerator(const std::type_index &type)
-{
- return _gen_map.at(type).initializer_gen;
-}
-
-std::shared_ptr<neurun::backend::IStageGenerator>
-BackendResolver::getStageGenerator(const std::type_index &type)
-{
- return _gen_map.at(type).stage_gen;
-}
-
-std::shared_ptr<neurun::backend::ITensorBuilder>
-BackendResolver::getTensorBuilder(const std::type_index &type)
-{
- return getStageGenerator(type)->tensor_builder();
-}
-
-std::set<std::shared_ptr<neurun::backend::ITensorBuilder>> BackendResolver::getAllTensorBuilders()
-{
- std::set<std::shared_ptr<neurun::backend::ITensorBuilder>> ret;
- for (const auto &it : _gen_map)
- {
- ret.insert(it.second.stage_gen->tensor_builder());
- }
- return ret;
-}
-
-std::shared_ptr<::internal::common::TensorBuilder> BackendResolver::getCommonTensorBuilder()
-{
- return _backend_manager.getCommonTensorBuilder();
-}
+#include "codegen/BackendResolver.h"
class Planner : public ::internal::tflite::op::NodeVisitor
{
public:
Planner(const neurun::graph::operand::Set &ctx, neurun::codegen::IPlanBuilder &builder,
- BackendResolver &backend_resolver)
+ neurun::codegen::BackendResolver &backend_resolver)
: _ctx{ctx}, _builder{builder}, _backend_resolver(backend_resolver)
{
}
private:
const neurun::graph::operand::Set &_ctx;
neurun::codegen::IPlanBuilder &_builder;
- BackendResolver &_backend_resolver;
+ neurun::codegen::BackendResolver &_backend_resolver;
};
void Planner::visit(const ::internal::tflite::op::Conv2D::implicit::Node &node)
void addStage(const Stage &stage) override;
public:
- void finalize(BackendResolver &backend_resolver);
+ void finalize(neurun::codegen::BackendResolver &backend_resolver);
public:
const std::map<int, ::arm_compute::TensorInfo> &tensor_info_ctx() { return _tensor_info_ctx; }
void PlanBuilder::addStage(const Stage &stage) { _stages.emplace_back(stage); }
-void PlanBuilder::finalize(BackendResolver &backend_resolver)
+void PlanBuilder::finalize(neurun::codegen::BackendResolver &backend_resolver)
{
auto tensor_builders = backend_resolver.getAllTensorBuilders();
auto common_tensor_builder = backend_resolver.getCommonTensorBuilder();
}
::internal::BackendManager backend_manager{plan};
- BackendResolver backend_resolver{backend_manager};
+ neurun::codegen::BackendResolver backend_resolver{backend_manager};
PlanBuilder plan_builder{plan};
for (uint32_t n = 0; n < operations.size(); ++n)