From 462175abb3a4bf8a42666bb1b457e5e0bb037ff9 Mon Sep 17 00:00:00 2001 From: =?utf8?q?=EC=9D=B4=ED=95=9C=EC=A2=85/=EB=8F=99=EC=9E=91=EC=A0=9C?= =?utf8?q?=EC=96=B4Lab=28SR=29/Engineer/=EC=82=BC=EC=84=B1=EC=A0=84?= =?utf8?q?=EC=9E=90?= Date: Mon, 30 Jul 2018 19:48:50 +0900 Subject: [PATCH] [neurun] Change namespace for acl_cl backend (#2106) Change namespace `internal::arm_compute` to `neurun::backend::acl_cl`. Signed-off-by: Hanjoung Lee --- .../src/backend/acl_cl/InitializerGenerator.cc | 11 +++--- .../src/backend/acl_cl/InitializerGenerator.h | 17 +++++---- .../neurun/src/backend/acl_cl/StageGenerator.cc | 42 ++++++++++++---------- .../neurun/src/backend/acl_cl/StageGenerator.h | 26 ++++++++------ .../neurun/src/backend/acl_cl/TensorBuilder.cc | 11 +++--- runtimes/neurun/src/backend/acl_cl/TensorBuilder.h | 18 ++++++---- runtimes/neurun/src/internal/BackendManager.cc | 10 +++--- 7 files changed, 79 insertions(+), 56 deletions(-) diff --git a/runtimes/neurun/src/backend/acl_cl/InitializerGenerator.cc b/runtimes/neurun/src/backend/acl_cl/InitializerGenerator.cc index b70c07c..e27e4f9 100644 --- a/runtimes/neurun/src/backend/acl_cl/InitializerGenerator.cc +++ b/runtimes/neurun/src/backend/acl_cl/InitializerGenerator.cc @@ -6,9 +6,11 @@ #include "internal/nnapi/kernel/Reader.h" #include "util/kernel/IndexIterator.h" -namespace internal +namespace neurun { -namespace arm_compute +namespace backend +{ +namespace acl_cl { InitializerGenerator::InitializerGenerator(const ::internal::tflite::operand::Set &ctx) : _ctx(ctx) @@ -124,5 +126,6 @@ InitializerGenerator::generateBias(const ::internal::tflite::op::FullyConnected: }; } -} // namespace arm_compute -} // namespace internal +} // namespace acl_cl +} // namespace backend +} // namespace neurun diff --git a/runtimes/neurun/src/backend/acl_cl/InitializerGenerator.h b/runtimes/neurun/src/backend/acl_cl/InitializerGenerator.h index 3bf610d..6884c90 100644 --- a/runtimes/neurun/src/backend/acl_cl/InitializerGenerator.h +++ b/runtimes/neurun/src/backend/acl_cl/InitializerGenerator.h @@ -1,13 +1,15 @@ -#ifndef __INTERNAL_ARM_COMPUTE_INITIALIZER_GENERATOR_H__ -#define __INTERNAL_ARM_COMPUTE_INITIALIZER_GENERATOR_H__ +#ifndef __NEURUN_BACKEND_ACL_CL_INITIALIZER_GENERATOR_H__ +#define __NEURUN_BACKEND_ACL_CL_INITIALIZER_GENERATOR_H__ #include "internal/IInitializerGenerator.h" #include "internal/Model.h" -namespace internal +namespace neurun { -namespace arm_compute +namespace backend +{ +namespace acl_cl { class InitializerGenerator : public ::internal::IInitializerGenerator @@ -25,7 +27,8 @@ private: const ::internal::tflite::operand::Set &_ctx; }; -} // namespace arm_compute -} // namespace internal +} // namespace acl_cl +} // namespace backend +} // namespace neurun -#endif // __INTERNAL_ARM_COMPUTE_INITIALIZER_GENERATOR_H__ +#endif // __NEURUN_BACKEND_ACL_CL_INITIALIZER_GENERATOR_H__ diff --git a/runtimes/neurun/src/backend/acl_cl/StageGenerator.cc b/runtimes/neurun/src/backend/acl_cl/StageGenerator.cc index 0c52fb6..c69ad69 100644 --- a/runtimes/neurun/src/backend/acl_cl/StageGenerator.cc +++ b/runtimes/neurun/src/backend/acl_cl/StageGenerator.cc @@ -32,9 +32,11 @@ template std::unique_ptr make_layer(void) { return std::unique_p ::arm_compute::DimensionRoundingType::FLOOR}; } -namespace internal +namespace neurun { -namespace arm_compute +namespace backend +{ +namespace acl_cl { // @@ -96,7 +98,7 @@ void ActivationBuilder::append(FuseCode code, ::arm_compute::ICLTensor *ifm_allo // StageGenerator::StageGenerator( const ::internal::tflite::operand::Set &ctx, - const std::shared_ptr<::internal::arm_compute::TensorBuilder> &tensor_builder, + const std::shared_ptr &tensor_builder, const std::shared_ptr<::internal::common::TensorBuilder> &common_tensor_builder) : _ctx(ctx), _tensor_builder(tensor_builder), _common_tensor_builder(common_tensor_builder) { @@ -127,7 +129,7 @@ Stage StageGenerator::generate(const ::internal::tflite::op::Conv2D::implicit::N assert((ANEURALNETWORKS_PADDING_SAME == padding_type) || (ANEURALNETWORKS_PADDING_VALID == padding_type)); - Stride stride; + ::internal::Stride stride; stride.vertical = _ctx.at(vstride_index).asScalar(); stride.horizontal = _ctx.at(hstride_index).asScalar(); @@ -140,8 +142,8 @@ Stage StageGenerator::generate(const ::internal::tflite::op::Conv2D::implicit::N int ker_index; int bias_index; - Padding padding; - Stride stride; + ::internal::Padding padding; + ::internal::Stride stride; FuseCode activation; }; @@ -154,9 +156,10 @@ Stage StageGenerator::generate(const ::internal::tflite::op::Conv2D::implicit::N param.bias_index = bias_index.asInt(); param.stride = stride; - param.padding = (padding_type == ANEURALNETWORKS_PADDING_SAME) - ? same_padding(ifm_shape, ofm_shape, stride, ker_shape.W, ker_shape.H) - : valid_padding(); + param.padding = + (padding_type == ANEURALNETWORKS_PADDING_SAME) + ? ::internal::same_padding(ifm_shape, ofm_shape, stride, ker_shape.W, ker_shape.H) + : ::internal::valid_padding(); param.activation = static_cast(_ctx.at(activation_index).asScalar()); @@ -214,8 +217,8 @@ Stage StageGenerator::generate(const ::internal::tflite::op::MaxPool2D::implicit uint32_t kw; uint32_t kh; - Padding padding; - Stride stride; + ::internal::Padding padding; + ::internal::Stride stride; // TODO Add 'activation' field }; @@ -232,8 +235,8 @@ Stage StageGenerator::generate(const ::internal::tflite::op::MaxPool2D::implicit param.stride.horizontal = hstride; param.padding = (padding_type == ANEURALNETWORKS_PADDING_SAME) - ? same_padding(ifm_shape, ofm_shape, param.stride, kw, kh) - : valid_padding(); + ? ::internal::same_padding(ifm_shape, ofm_shape, param.stride, kw, kh) + : ::internal::valid_padding(); VERBOSE(MaxPool2D) << "IFM_H: " << ifm_shape.H << std::endl; VERBOSE(MaxPool2D) << "IFM_W: " << ifm_shape.W << std::endl; @@ -303,8 +306,8 @@ Stage StageGenerator::generate(const ::internal::tflite::op::AvgPool2D::implicit uint32_t kw; uint32_t kh; - Padding padding; - Stride stride; + ::internal::Padding padding; + ::internal::Stride stride; // TODO Add 'activation' field }; @@ -321,8 +324,8 @@ Stage StageGenerator::generate(const ::internal::tflite::op::AvgPool2D::implicit param.stride.horizontal = hstride; param.padding = (padding_type == ANEURALNETWORKS_PADDING_SAME) - ? same_padding(ifm_shape, ofm_shape, param.stride, kw, kh) - : valid_padding(); + ? ::internal::same_padding(ifm_shape, ofm_shape, param.stride, kw, kh) + : ::internal::valid_padding(); VERBOSE(AvgPool2D) << "IFM_H: " << ifm_shape.H << std::endl; VERBOSE(AvgPool2D) << "IFM_W: " << ifm_shape.W << std::endl; @@ -527,5 +530,6 @@ Stage StageGenerator::generate(const ::internal::tflite::op::TensorConvert::AclT throw std::runtime_error("NYI - StageGenerator::generate(TensorConvert::AclToCommon)"); } -} // namespace arm_compute -} // namespace internal +} // namespace acl_cl +} // namespace backend +} // namespace neurun diff --git a/runtimes/neurun/src/backend/acl_cl/StageGenerator.h b/runtimes/neurun/src/backend/acl_cl/StageGenerator.h index cbdf1e7..6f1dadf 100644 --- a/runtimes/neurun/src/backend/acl_cl/StageGenerator.h +++ b/runtimes/neurun/src/backend/acl_cl/StageGenerator.h @@ -1,5 +1,5 @@ -#ifndef __INTERNAL_ARM_COMPUTE_STAGE_GENERATOR_H__ -#define __INTERNAL_ARM_COMPUTE_STAGE_GENERATOR_H__ +#ifndef __NEURUN_BACKEND_ACL_CL_STAGE_GENERATOR_H__ +#define __NEURUN_BACKEND_ACL_CL_STAGE_GENERATOR_H__ #include "internal/IStageGenerator.h" @@ -7,19 +7,24 @@ #include "backend/acl_cl/TensorBuilder.h" #include "internal/common/TensorBuilder.h" -namespace internal +namespace neurun { -namespace arm_compute +namespace backend +{ +namespace acl_cl { class StageGenerator : public ::internal::IStageGenerator { public: StageGenerator(const ::internal::tflite::operand::Set &ctx, - const std::shared_ptr<::internal::arm_compute::TensorBuilder> &tensor_builder, + const std::shared_ptr &tensor_builder, const std::shared_ptr<::internal::common::TensorBuilder> &common_tensor_builder); - virtual std::shared_ptr tensor_builder() override { return _tensor_builder; } + virtual std::shared_ptr<::internal::ITensorBuilder> tensor_builder() override + { + return _tensor_builder; + } virtual Stage generate(const ::internal::tflite::op::Conv2D::implicit::Node &node) override; virtual Stage generate(const ::internal::tflite::op::MaxPool2D::implicit::Node &node) override; @@ -39,11 +44,12 @@ public: private: const ::internal::tflite::operand::Set &_ctx; - std::shared_ptr<::internal::arm_compute::TensorBuilder> _tensor_builder; + std::shared_ptr _tensor_builder; std::shared_ptr<::internal::common::TensorBuilder> _common_tensor_builder; }; -} // namespace arm_compute -} // namespace internal +} // namespace acl_cl +} // namespace backend +} // namespace neurun -#endif // __INTERNAL_ARM_COMPUTE_STAGE_GENERATOR_H__ +#endif // __NEURUN_BACKEND_ACL_CL_STAGE_GENERATOR_H__ diff --git a/runtimes/neurun/src/backend/acl_cl/TensorBuilder.cc b/runtimes/neurun/src/backend/acl_cl/TensorBuilder.cc index 78d58c6..3161503 100644 --- a/runtimes/neurun/src/backend/acl_cl/TensorBuilder.cc +++ b/runtimes/neurun/src/backend/acl_cl/TensorBuilder.cc @@ -4,9 +4,11 @@ #include "internal/arm_compute.h" -namespace internal +namespace neurun { -namespace arm_compute +namespace backend +{ +namespace acl_cl { TensorBuilder::TensorBuilder(::internal::arm_compute::Plan &plan) : _plan(plan) @@ -55,5 +57,6 @@ TensorBuilder::at(const ::internal::tflite::operand::Index &ind) return _tensors.at(ind.asInt()); } -} // namespace arm_compute -} // namespace internal +} // namespace acl_cl +} // namespace backend +} // namespace neurun diff --git a/runtimes/neurun/src/backend/acl_cl/TensorBuilder.h b/runtimes/neurun/src/backend/acl_cl/TensorBuilder.h index ddc4921..63b13d9 100644 --- a/runtimes/neurun/src/backend/acl_cl/TensorBuilder.h +++ b/runtimes/neurun/src/backend/acl_cl/TensorBuilder.h @@ -1,16 +1,19 @@ -#ifndef __INTERNAL_ARM_COMPUTE_TENSOR_BUILDER_H__ -#define __INTERNAL_ARM_COMPUTE_TENSOR_BUILDER_H__ +#ifndef __NEURUN_BACKEND_ACL_CL_TENSOR_BUILDER_H__ +#define __NEURUN_BACKEND_ACL_CL_TENSOR_BUILDER_H__ #include "internal/ITensorBuilder.h" +#include "internal/arm_compute.h" #include #include #include -namespace internal +namespace neurun { -namespace arm_compute +namespace backend +{ +namespace acl_cl { class Plan; @@ -32,7 +35,8 @@ private: std::unordered_map> _tensors; }; -} // namespace arm_compute -} // namespace internal +} // namespace acl_cl +} // namespace backend +} // namespace neurun -#endif // __INTERNAL_ARM_COMPUTE_TENSOR_BUILDER_H__ +#endif // __NEURUN_BACKEND_ACL_CL_TENSOR_BUILDER_H__ diff --git a/runtimes/neurun/src/internal/BackendManager.cc b/runtimes/neurun/src/internal/BackendManager.cc index 52b2874..9e31ed4 100644 --- a/runtimes/neurun/src/internal/BackendManager.cc +++ b/runtimes/neurun/src/internal/BackendManager.cc @@ -18,11 +18,11 @@ BackendManager::BackendManager(::internal::arm_compute::Plan &plan) : _plan(plan // Add arm_compute backend { - auto acl_tensor_builder = std::make_shared<::internal::arm_compute::TensorBuilder>(_plan); - auto acl_initializer_gen = - std::make_shared<::internal::arm_compute::InitializerGenerator>(operands); - auto acl_stage_gen = std::make_shared<::internal::arm_compute::StageGenerator>( - operands, acl_tensor_builder, _common_tensor_builder); + using namespace ::neurun::backend::acl_cl; + auto acl_tensor_builder = std::make_shared(_plan); + auto acl_initializer_gen = std::make_shared(operands); + auto acl_stage_gen = + std::make_shared(operands, acl_tensor_builder, _common_tensor_builder); // TODO Do not use magic string for backend id _gen_map["acl_cl"] = {acl_initializer_gen, acl_stage_gen}; -- 2.7.4