[neurun] Change namespace of cpu backend (#2123)
author이한종/동작제어Lab(SR)/Engineer/삼성전자 <hanjoung.lee@samsung.com>
Tue, 31 Jul 2018 09:45:45 +0000 (18:45 +0900)
committer박세희/동작제어Lab(SR)/Principal Engineer/삼성전자 <saehie.park@samsung.com>
Tue, 31 Jul 2018 09:45:45 +0000 (18:45 +0900)
Change namespace `internal::cpu` to `neurun::backend::cpu`.

Signed-off-by: Hanjoung Lee <hanjoung.lee@samsung.com>
runtimes/neurun/src/backend/cpu/InitializerGenerator.cc
runtimes/neurun/src/backend/cpu/InitializerGenerator.h
runtimes/neurun/src/backend/cpu/StageGenerator.cc
runtimes/neurun/src/backend/cpu/StageGenerator.h
runtimes/neurun/src/backend/cpu/TensorBuilder.cc
runtimes/neurun/src/backend/cpu/TensorBuilder.h
runtimes/neurun/src/internal/BackendManager.cc

index 23a5329..2af84a2 100644 (file)
@@ -6,7 +6,9 @@
 
 #include "NeuralNetworks.h"
 
-namespace internal
+namespace neurun
+{
+namespace backend
 {
 namespace cpu
 {
@@ -188,5 +190,6 @@ InitializerGenerator::generateBias(const ::internal::tflite::op::FullyConnected:
   }
 }
 
-} // namespace arm_compute
-} // namespace internal
+} // namespace cpu
+} // namespace backend
+} // namespace neurun
index be18ae3..2ce8e4a 100644 (file)
@@ -1,11 +1,13 @@
-#ifndef __INTERNAL_CPU_INITIALIZER_GENERATOR_H__
-#define __INTERNAL_CPU_INITIALIZER_GENERATOR_H__
+#ifndef __NEURUN_BACKEND_CPU_INITIALIZER_GENERATOR_H__
+#define __NEURUN_BACKEND_CPU_INITIALIZER_GENERATOR_H__
 
 #include "internal/IInitializerGenerator.h"
 
 #include "internal/Model.h"
 
-namespace internal
+namespace neurun
+{
+namespace backend
 {
 namespace cpu
 {
@@ -26,6 +28,7 @@ private:
 };
 
 } // namespace cpu
-} // namespace internal
+} // namespace backend
+} // namespace neurun
 
-#endif // __INTERNAL_CPU_INITIALIZER_GENERATOR_H__
+#endif // __NEURUN_BACKEND_CPU_INITIALIZER_GENERATOR_H__
index f759f45..769c3fb 100644 (file)
 
 #include "logging.h"
 
-namespace internal
+namespace neurun
+{
+namespace backend
 {
 namespace cpu
 {
 
 StageGenerator::StageGenerator(
     const ::internal::tflite::operand::Set &operand_ctx,
-    const std::shared_ptr<::internal::cpu::TensorBuilder> &tensor_builder,
+    const std::shared_ptr<TensorBuilder> &tensor_builder,
     const std::shared_ptr<::internal::common::TensorBuilder> &common_tensor_builder)
     : _ctx(operand_ctx), _tensor_builder(tensor_builder),
       _common_tensor_builder(common_tensor_builder)
@@ -51,7 +53,7 @@ Stage StageGenerator::generate(const ::internal::tflite::op::Conv2D::implicit::N
   assert((ANEURALNETWORKS_PADDING_SAME == padding_type) ||
          (ANEURALNETWORKS_PADDING_VALID == padding_type));
 
-  Stride stride;
+  ::internal::Stride stride;
 
   stride.vertical = _ctx.at(vstride_index).asScalar<int32_t>();
   stride.horizontal = _ctx.at(hstride_index).asScalar<int32_t>();
@@ -69,8 +71,8 @@ Stage StageGenerator::generate(const ::internal::tflite::op::Conv2D::implicit::N
     ::internal::tflite::operand::Shape ker_shape{1};
     ::internal::tflite::operand::Shape bias_shape{1};
 
-    Padding padding;
-    Stride stride;
+    ::internal::Padding padding;
+    ::internal::Stride stride;
 
     FuseCode activation;
   };
@@ -88,11 +90,11 @@ Stage StageGenerator::generate(const ::internal::tflite::op::Conv2D::implicit::N
   param.bias_shape = _ctx.at(bias_index).shape();
 
   param.stride = stride;
-  param.padding =
-      (padding_type == ANEURALNETWORKS_PADDING_SAME)
-          ? same_padding(param.ifm_shape.asFeature(), param.ofm_shape.asFeature(), stride,
-                         param.ker_shape.asKernel().W, param.ker_shape.asKernel().H)
-          valid_padding();
+  param.padding = (padding_type == ANEURALNETWORKS_PADDING_SAME)
+                      ? ::internal::same_padding(
+                            param.ifm_shape.asFeature(), param.ofm_shape.asFeature(), stride,
+                            param.ker_shape.asKernel().W, param.ker_shape.asKernel().H)
+                      : ::internal::valid_padding();
 
   param.activation = static_cast<FuseCode>(_ctx.at(activation_index).asScalar<int32_t>());
 
@@ -153,8 +155,8 @@ Stage StageGenerator::generate(const ::internal::tflite::op::MaxPool2D::implicit
     ::internal::tflite::operand::Shape ofm_shape{1};
     ::internal::tflite::operand::Shape ifm_shape{1};
 
-    Padding padding;
-    Stride stride;
+    ::internal::Padding padding;
+    ::internal::Stride stride;
 
     FuseCode activation;
   };
@@ -174,9 +176,9 @@ Stage StageGenerator::generate(const ::internal::tflite::op::MaxPool2D::implicit
   param.stride.horizontal = hstride;
 
   param.padding = (padding_type == ANEURALNETWORKS_PADDING_SAME)
-                      ? same_padding(param.ifm_shape.asFeature(), param.ofm_shape.asFeature(),
-                                     param.stride, kw, kh)
-                      : valid_padding();
+                      ? ::internal::same_padding(param.ifm_shape.asFeature(),
+                                                 param.ofm_shape.asFeature(), param.stride, kw, kh)
+                      : ::internal::valid_padding();
 
   param.activation = static_cast<FuseCode>(_ctx.at(activation_index).asScalar<int32_t>());
 
@@ -251,8 +253,8 @@ Stage StageGenerator::generate(const ::internal::tflite::op::AvgPool2D::implicit
     ::internal::tflite::operand::Shape ofm_shape{1};
     ::internal::tflite::operand::Shape ifm_shape{1};
 
-    Padding padding;
-    Stride stride;
+    ::internal::Padding padding;
+    ::internal::Stride stride;
 
     FuseCode activation;
   };
@@ -272,9 +274,9 @@ Stage StageGenerator::generate(const ::internal::tflite::op::AvgPool2D::implicit
   param.stride.horizontal = hstride;
 
   param.padding = (padding_type == ANEURALNETWORKS_PADDING_SAME)
-                      ? same_padding(param.ifm_shape.asFeature(), param.ofm_shape.asFeature(),
-                                     param.stride, kw, kh)
-                      : valid_padding();
+                      ? ::internal::same_padding(param.ifm_shape.asFeature(),
+                                                 param.ofm_shape.asFeature(), param.stride, kw, kh)
+                      : ::internal::valid_padding();
 
   param.activation = static_cast<FuseCode>(_ctx.at(activation_index).asScalar<int32_t>());
 
@@ -527,5 +529,6 @@ Stage StageGenerator::generate(const ::internal::tflite::op::TensorConvert::AclT
   throw std::runtime_error("Wrong Approach");
 }
 
-} // namespace stage
-} // namespace internal
+} // namespace neurun
+} // namespace backend
+} // namespace cpu
index eafee8a..7b33a4a 100644 (file)
@@ -1,5 +1,5 @@
-#ifndef __INTERNAL_CPU_STAGE_GENERATOR_H__
-#define __INTERNAL_CPU_STAGE_GENERATOR_H__
+#ifndef __NEURUN_BACKEND_CPU_STAGE_GENERATOR_H__
+#define __NEURUN_BACKEND_CPU_STAGE_GENERATOR_H__
 
 #include "internal/IStageGenerator.h"
 
@@ -9,7 +9,9 @@
 
 #include "internal/common/TensorBuilder.h"
 
-namespace internal
+namespace neurun
+{
+namespace backend
 {
 namespace cpu
 {
@@ -18,10 +20,13 @@ class StageGenerator : public ::internal::IStageGenerator
 {
 public:
   StageGenerator(const ::internal::tflite::operand::Set &ctx,
-                 const std::shared_ptr<::internal::cpu::TensorBuilder> &tensor_builder,
+                 const std::shared_ptr<TensorBuilder> &tensor_builder,
                  const std::shared_ptr<::internal::common::TensorBuilder> &common_tensor_builder);
 
-  virtual std::shared_ptr<ITensorBuilder> tensor_builder() override { return _tensor_builder; }
+  virtual std::shared_ptr<::internal::ITensorBuilder> tensor_builder() override
+  {
+    return _tensor_builder;
+  }
 
   virtual Stage generate(const ::internal::tflite::op::Conv2D::implicit::Node &node) override;
   virtual Stage generate(const ::internal::tflite::op::MaxPool2D::implicit::Node &node) override;
@@ -41,11 +46,12 @@ public:
 
 private:
   const ::internal::tflite::operand::Set &_ctx;
-  std::shared_ptr<::internal::cpu::TensorBuilder> _tensor_builder;
+  std::shared_ptr<TensorBuilder> _tensor_builder;
   std::shared_ptr<::internal::common::TensorBuilder> _common_tensor_builder;
 };
 
 } // namespace cpu
-} // namespace internal
+} // namespace backend
+} // namespace neurun
 
-#endif // __INTERNAL_CPU_STAGE_GENERATOR_H__
+#endif // __NEURUN_BACKEND_CPU_STAGE_GENERATOR_H__
index d13671f..7b31bec 100644 (file)
@@ -4,7 +4,9 @@
 
 #include "internal/arm_compute.h"
 
-namespace internal
+namespace neurun
+{
+namespace backend
 {
 namespace cpu
 {
@@ -51,4 +53,5 @@ TensorBuilder::at(const ::internal::tflite::operand::Index &ind)
 }
 
 } // namespace cpu
-} // namespace internal
+} // namespace backend
+} // namespace neurun
index d1b647b..5395161 100644 (file)
@@ -1,5 +1,5 @@
-#ifndef __INTERNAL_CPU_TENSOR_BUILDER_H__
-#define __INTERNAL_CPU_TENSOR_BUILDER_H__
+#ifndef __NEURUN_BACKEND_CPU_TENSOR_BUILDER_H__
+#define __NEURUN_BACKEND_CPU_TENSOR_BUILDER_H__
 
 #include <unordered_map>
 #include <unordered_set>
@@ -8,7 +8,9 @@
 #include "internal/cpu.h"
 #include "internal/arm_compute.h"
 
-namespace internal
+namespace neurun
+{
+namespace backend
 {
 namespace cpu
 {
@@ -33,6 +35,7 @@ private:
 };
 
 } // namespace cpu
-} // namespace internal
+} // namespace backend
+} // namespace neurun
 
-#endif // __INTERNAL_CPU_TENSOR_BUILDER_H__
+#endif // __NEURUN_BACKEND_CPU_TENSOR_BUILDER_H__
index 9e31ed4..e8fd286 100644 (file)
@@ -30,10 +30,11 @@ BackendManager::BackendManager(::internal::arm_compute::Plan &plan) : _plan(plan
 
   // Add CPU backend
   {
-    auto cpu_tensor_builder = std::make_shared<::internal::cpu::TensorBuilder>(_plan);
-    auto cpu_initializer_gen = std::make_shared<::internal::cpu::InitializerGenerator>(operands);
-    auto cpu_stage_gen = std::make_shared<::internal::cpu::StageGenerator>(
-        operands, cpu_tensor_builder, _common_tensor_builder);
+    using namespace ::neurun::backend::cpu;
+    auto cpu_tensor_builder = std::make_shared<TensorBuilder>(_plan);
+    auto cpu_initializer_gen = std::make_shared<InitializerGenerator>(operands);
+    auto cpu_stage_gen =
+        std::make_shared<StageGenerator>(operands, cpu_tensor_builder, _common_tensor_builder);
 
     // TODO Do not use magic string for backend id
     _gen_map["cpu"] = {cpu_initializer_gen, cpu_stage_gen};