[neurun] Change namespace for acl_cl backend (#2106)
author이한종/동작제어Lab(SR)/Engineer/삼성전자 <hanjoung.lee@samsung.com>
Mon, 30 Jul 2018 10:48:50 +0000 (19:48 +0900)
committer이춘석/동작제어Lab(SR)/Staff Engineer/삼성전자 <chunseok.lee@samsung.com>
Mon, 30 Jul 2018 10:48:50 +0000 (19:48 +0900)
Change namespace `internal::arm_compute` to `neurun::backend::acl_cl`.

Signed-off-by: Hanjoung Lee <hanjoung.lee@samsung.com>
runtimes/neurun/src/backend/acl_cl/InitializerGenerator.cc
runtimes/neurun/src/backend/acl_cl/InitializerGenerator.h
runtimes/neurun/src/backend/acl_cl/StageGenerator.cc
runtimes/neurun/src/backend/acl_cl/StageGenerator.h
runtimes/neurun/src/backend/acl_cl/TensorBuilder.cc
runtimes/neurun/src/backend/acl_cl/TensorBuilder.h
runtimes/neurun/src/internal/BackendManager.cc

index b70c07c..e27e4f9 100644 (file)
@@ -6,9 +6,11 @@
 #include "internal/nnapi/kernel/Reader.h"
 #include "util/kernel/IndexIterator.h"
 
-namespace internal
+namespace neurun
 {
-namespace arm_compute
+namespace backend
+{
+namespace acl_cl
 {
 
 InitializerGenerator::InitializerGenerator(const ::internal::tflite::operand::Set &ctx) : _ctx(ctx)
@@ -124,5 +126,6 @@ InitializerGenerator::generateBias(const ::internal::tflite::op::FullyConnected:
   };
 }
 
-} // namespace arm_compute
-} // namespace internal
+} // namespace acl_cl
+} // namespace backend
+} // namespace neurun
index 3bf610d..6884c90 100644 (file)
@@ -1,13 +1,15 @@
-#ifndef __INTERNAL_ARM_COMPUTE_INITIALIZER_GENERATOR_H__
-#define __INTERNAL_ARM_COMPUTE_INITIALIZER_GENERATOR_H__
+#ifndef __NEURUN_BACKEND_ACL_CL_INITIALIZER_GENERATOR_H__
+#define __NEURUN_BACKEND_ACL_CL_INITIALIZER_GENERATOR_H__
 
 #include "internal/IInitializerGenerator.h"
 
 #include "internal/Model.h"
 
-namespace internal
+namespace neurun
 {
-namespace arm_compute
+namespace backend
+{
+namespace acl_cl
 {
 
 class InitializerGenerator : public ::internal::IInitializerGenerator
@@ -25,7 +27,8 @@ private:
   const ::internal::tflite::operand::Set &_ctx;
 };
 
-} // namespace arm_compute
-} // namespace internal
+} // namespace acl_cl
+} // namespace backend
+} // namespace neurun
 
-#endif // __INTERNAL_ARM_COMPUTE_INITIALIZER_GENERATOR_H__
+#endif // __NEURUN_BACKEND_ACL_CL_INITIALIZER_GENERATOR_H__
index 0c52fb6..c69ad69 100644 (file)
@@ -32,9 +32,11 @@ template <typename T> std::unique_ptr<T> make_layer(void) { return std::unique_p
                                       ::arm_compute::DimensionRoundingType::FLOOR};
 }
 
-namespace internal
+namespace neurun
 {
-namespace arm_compute
+namespace backend
+{
+namespace acl_cl
 {
 
 //
@@ -96,7 +98,7 @@ void ActivationBuilder::append(FuseCode code, ::arm_compute::ICLTensor *ifm_allo
 //
 StageGenerator::StageGenerator(
     const ::internal::tflite::operand::Set &ctx,
-    const std::shared_ptr<::internal::arm_compute::TensorBuilder> &tensor_builder,
+    const std::shared_ptr<TensorBuilder> &tensor_builder,
     const std::shared_ptr<::internal::common::TensorBuilder> &common_tensor_builder)
     : _ctx(ctx), _tensor_builder(tensor_builder), _common_tensor_builder(common_tensor_builder)
 {
@@ -127,7 +129,7 @@ Stage StageGenerator::generate(const ::internal::tflite::op::Conv2D::implicit::N
   assert((ANEURALNETWORKS_PADDING_SAME == padding_type) ||
          (ANEURALNETWORKS_PADDING_VALID == padding_type));
 
-  Stride stride;
+  ::internal::Stride stride;
 
   stride.vertical = _ctx.at(vstride_index).asScalar<int32_t>();
   stride.horizontal = _ctx.at(hstride_index).asScalar<int32_t>();
@@ -140,8 +142,8 @@ Stage StageGenerator::generate(const ::internal::tflite::op::Conv2D::implicit::N
     int ker_index;
     int bias_index;
 
-    Padding padding;
-    Stride stride;
+    ::internal::Padding padding;
+    ::internal::Stride stride;
 
     FuseCode activation;
   };
@@ -154,9 +156,10 @@ Stage StageGenerator::generate(const ::internal::tflite::op::Conv2D::implicit::N
   param.bias_index = bias_index.asInt();
 
   param.stride = stride;
-  param.padding = (padding_type == ANEURALNETWORKS_PADDING_SAME)
-                      ? same_padding(ifm_shape, ofm_shape, stride, ker_shape.W, ker_shape.H)
-                      : valid_padding();
+  param.padding =
+      (padding_type == ANEURALNETWORKS_PADDING_SAME)
+          ? ::internal::same_padding(ifm_shape, ofm_shape, stride, ker_shape.W, ker_shape.H)
+          : ::internal::valid_padding();
 
   param.activation = static_cast<FuseCode>(_ctx.at(activation_index).asScalar<int32_t>());
 
@@ -214,8 +217,8 @@ Stage StageGenerator::generate(const ::internal::tflite::op::MaxPool2D::implicit
     uint32_t kw;
     uint32_t kh;
 
-    Padding padding;
-    Stride stride;
+    ::internal::Padding padding;
+    ::internal::Stride stride;
 
     // TODO Add 'activation' field
   };
@@ -232,8 +235,8 @@ Stage StageGenerator::generate(const ::internal::tflite::op::MaxPool2D::implicit
   param.stride.horizontal = hstride;
 
   param.padding = (padding_type == ANEURALNETWORKS_PADDING_SAME)
-                      ? same_padding(ifm_shape, ofm_shape, param.stride, kw, kh)
-                      : valid_padding();
+                      ? ::internal::same_padding(ifm_shape, ofm_shape, param.stride, kw, kh)
+                      : ::internal::valid_padding();
 
   VERBOSE(MaxPool2D) << "IFM_H: " << ifm_shape.H << std::endl;
   VERBOSE(MaxPool2D) << "IFM_W: " << ifm_shape.W << std::endl;
@@ -303,8 +306,8 @@ Stage StageGenerator::generate(const ::internal::tflite::op::AvgPool2D::implicit
     uint32_t kw;
     uint32_t kh;
 
-    Padding padding;
-    Stride stride;
+    ::internal::Padding padding;
+    ::internal::Stride stride;
 
     // TODO Add 'activation' field
   };
@@ -321,8 +324,8 @@ Stage StageGenerator::generate(const ::internal::tflite::op::AvgPool2D::implicit
   param.stride.horizontal = hstride;
 
   param.padding = (padding_type == ANEURALNETWORKS_PADDING_SAME)
-                      ? same_padding(ifm_shape, ofm_shape, param.stride, kw, kh)
-                      : valid_padding();
+                      ? ::internal::same_padding(ifm_shape, ofm_shape, param.stride, kw, kh)
+                      : ::internal::valid_padding();
 
   VERBOSE(AvgPool2D) << "IFM_H: " << ifm_shape.H << std::endl;
   VERBOSE(AvgPool2D) << "IFM_W: " << ifm_shape.W << std::endl;
@@ -527,5 +530,6 @@ Stage StageGenerator::generate(const ::internal::tflite::op::TensorConvert::AclT
   throw std::runtime_error("NYI - StageGenerator::generate(TensorConvert::AclToCommon)");
 }
 
-} // namespace arm_compute
-} // namespace internal
+} // namespace acl_cl
+} // namespace backend
+} // namespace neurun
index cbdf1e7..6f1dadf 100644 (file)
@@ -1,5 +1,5 @@
-#ifndef __INTERNAL_ARM_COMPUTE_STAGE_GENERATOR_H__
-#define __INTERNAL_ARM_COMPUTE_STAGE_GENERATOR_H__
+#ifndef __NEURUN_BACKEND_ACL_CL_STAGE_GENERATOR_H__
+#define __NEURUN_BACKEND_ACL_CL_STAGE_GENERATOR_H__
 
 #include "internal/IStageGenerator.h"
 
@@ -7,19 +7,24 @@
 #include "backend/acl_cl/TensorBuilder.h"
 #include "internal/common/TensorBuilder.h"
 
-namespace internal
+namespace neurun
 {
-namespace arm_compute
+namespace backend
+{
+namespace acl_cl
 {
 
 class StageGenerator : public ::internal::IStageGenerator
 {
 public:
   StageGenerator(const ::internal::tflite::operand::Set &ctx,
-                 const std::shared_ptr<::internal::arm_compute::TensorBuilder> &tensor_builder,
+                 const std::shared_ptr<TensorBuilder> &tensor_builder,
                  const std::shared_ptr<::internal::common::TensorBuilder> &common_tensor_builder);
 
-  virtual std::shared_ptr<ITensorBuilder> tensor_builder() override { return _tensor_builder; }
+  virtual std::shared_ptr<::internal::ITensorBuilder> tensor_builder() override
+  {
+    return _tensor_builder;
+  }
 
   virtual Stage generate(const ::internal::tflite::op::Conv2D::implicit::Node &node) override;
   virtual Stage generate(const ::internal::tflite::op::MaxPool2D::implicit::Node &node) override;
@@ -39,11 +44,12 @@ public:
 
 private:
   const ::internal::tflite::operand::Set &_ctx;
-  std::shared_ptr<::internal::arm_compute::TensorBuilder> _tensor_builder;
+  std::shared_ptr<TensorBuilder> _tensor_builder;
   std::shared_ptr<::internal::common::TensorBuilder> _common_tensor_builder;
 };
 
-} // namespace arm_compute
-} // namespace internal
+} // namespace acl_cl
+} // namespace backend
+} // namespace neurun
 
-#endif // __INTERNAL_ARM_COMPUTE_STAGE_GENERATOR_H__
+#endif // __NEURUN_BACKEND_ACL_CL_STAGE_GENERATOR_H__
index 78d58c6..3161503 100644 (file)
@@ -4,9 +4,11 @@
 
 #include "internal/arm_compute.h"
 
-namespace internal
+namespace neurun
 {
-namespace arm_compute
+namespace backend
+{
+namespace acl_cl
 {
 
 TensorBuilder::TensorBuilder(::internal::arm_compute::Plan &plan) : _plan(plan)
@@ -55,5 +57,6 @@ TensorBuilder::at(const ::internal::tflite::operand::Index &ind)
   return _tensors.at(ind.asInt());
 }
 
-} // namespace arm_compute
-} // namespace internal
+} // namespace acl_cl
+} // namespace backend
+} // namespace neurun
index ddc4921..63b13d9 100644 (file)
@@ -1,16 +1,19 @@
-#ifndef __INTERNAL_ARM_COMPUTE_TENSOR_BUILDER_H__
-#define __INTERNAL_ARM_COMPUTE_TENSOR_BUILDER_H__
+#ifndef __NEURUN_BACKEND_ACL_CL_TENSOR_BUILDER_H__
+#define __NEURUN_BACKEND_ACL_CL_TENSOR_BUILDER_H__
 
 #include "internal/ITensorBuilder.h"
+#include "internal/arm_compute.h"
 
 #include <unordered_map>
 #include <unordered_set>
 
 #include <arm_compute/runtime/CL/CLTensor.h>
 
-namespace internal
+namespace neurun
 {
-namespace arm_compute
+namespace backend
+{
+namespace acl_cl
 {
 
 class Plan;
@@ -32,7 +35,8 @@ private:
   std::unordered_map<int, std::shared_ptr<::arm_compute::CLTensor>> _tensors;
 };
 
-} // namespace arm_compute
-} // namespace internal
+} // namespace acl_cl
+} // namespace backend
+} // namespace neurun
 
-#endif // __INTERNAL_ARM_COMPUTE_TENSOR_BUILDER_H__
+#endif // __NEURUN_BACKEND_ACL_CL_TENSOR_BUILDER_H__
index 52b2874..9e31ed4 100644 (file)
@@ -18,11 +18,11 @@ BackendManager::BackendManager(::internal::arm_compute::Plan &plan) : _plan(plan
 
   // Add arm_compute backend
   {
-    auto acl_tensor_builder = std::make_shared<::internal::arm_compute::TensorBuilder>(_plan);
-    auto acl_initializer_gen =
-        std::make_shared<::internal::arm_compute::InitializerGenerator>(operands);
-    auto acl_stage_gen = std::make_shared<::internal::arm_compute::StageGenerator>(
-        operands, acl_tensor_builder, _common_tensor_builder);
+    using namespace ::neurun::backend::acl_cl;
+    auto acl_tensor_builder = std::make_shared<TensorBuilder>(_plan);
+    auto acl_initializer_gen = std::make_shared<InitializerGenerator>(operands);
+    auto acl_stage_gen =
+        std::make_shared<StageGenerator>(operands, acl_tensor_builder, _common_tensor_builder);
 
     // TODO Do not use magic string for backend id
     _gen_map["acl_cl"] = {acl_initializer_gen, acl_stage_gen};