Move Padding module from `internal` to `util`. (#3882)
author이한종/동작제어Lab(SR)/Engineer/삼성전자 <hanjoung.lee@samsung.com>
Fri, 7 Dec 2018 07:48:57 +0000 (16:48 +0900)
committer오형석/동작제어Lab(SR)/Staff Engineer/삼성전자 <hseok82.oh@samsung.com>
Fri, 7 Dec 2018 07:48:57 +0000 (16:48 +0900)
Move Padding module from `internal` to `util`.

Part of #3764

Signed-off-by: Hanjoung Lee <hanjoung.lee@samsung.com>
runtimes/neurun/src/backend/acl_cl/StageGenerator.cc
runtimes/neurun/src/backend/cpu/StageGenerator.cc
runtimes/neurun/src/util/Padding.cc [moved from runtimes/neurun/src/internal/Padding.cc with 95% similarity]
runtimes/neurun/src/util/Padding.h [moved from runtimes/neurun/src/internal/Padding.h with 85% similarity]

index 2583b0a..28a0d8d 100644 (file)
@@ -27,7 +27,7 @@
 
 #include "kernel/acl_cl/ConcatLayer.h"
 
-#include "internal/Padding.h"
+#include "util/Padding.h"
 
 #include "graph/operand/Index.h"
 
@@ -46,8 +46,8 @@ make_cl_function(std::unique_ptr<::arm_compute::IFunction> &&layer)
       new ::neurun::kernel::acl_cl::CLFunction(std::move(layer)));
 }
 
-::arm_compute::PadStrideInfo asPadStringInfo(const ::internal::Padding &padding,
-                                             const ::internal::Stride &stride)
+::arm_compute::PadStrideInfo asPadStringInfo(const neurun::util::Padding &padding,
+                                             const neurun::util::Stride &stride)
 {
   return ::arm_compute::PadStrideInfo{stride.horizontal,
                                       stride.vertical,
@@ -156,7 +156,7 @@ void StageGenerator::visit(const graph::operation::Conv2DNode &node)
   assert((ANEURALNETWORKS_PADDING_SAME == padding_type) ||
          (ANEURALNETWORKS_PADDING_VALID == padding_type));
 
-  ::internal::Stride stride;
+  neurun::util::Stride stride;
 
   stride.vertical = _ctx.at(vstride_index).asScalar<int32_t>();
   stride.horizontal = _ctx.at(hstride_index).asScalar<int32_t>();
@@ -169,8 +169,8 @@ void StageGenerator::visit(const graph::operation::Conv2DNode &node)
     graph::operand::Index ker_index;
     graph::operand::Index bias_index;
 
-    ::internal::Padding padding;
-    ::internal::Stride stride;
+    neurun::util::Padding padding;
+    neurun::util::Stride stride;
 
     FuseCode activation;
   };
@@ -185,8 +185,8 @@ void StageGenerator::visit(const graph::operation::Conv2DNode &node)
   param.stride = stride;
   param.padding =
       (padding_type == ANEURALNETWORKS_PADDING_SAME)
-          ? ::internal::same_padding(ifm_shape, ofm_shape, stride, ker_shape.W, ker_shape.H)
-          : ::internal::valid_padding();
+          ? neurun::util::same_padding(ifm_shape, ofm_shape, stride, ker_shape.W, ker_shape.H)
+          : neurun::util::valid_padding();
 
   param.activation = static_cast<FuseCode>(_ctx.at(activation_index).asScalar<int32_t>());
 
@@ -247,8 +247,8 @@ void StageGenerator::visit(const graph::operation::MaxPool2DNode &node)
     uint32_t kw;
     uint32_t kh;
 
-    ::internal::Padding padding;
-    ::internal::Stride stride;
+    neurun::util::Padding padding;
+    neurun::util::Stride stride;
 
     // TODO Add 'activation' field
   };
@@ -265,8 +265,8 @@ void StageGenerator::visit(const graph::operation::MaxPool2DNode &node)
   param.stride.horizontal = hstride;
 
   param.padding = (padding_type == ANEURALNETWORKS_PADDING_SAME)
-                      ? ::internal::same_padding(ifm_shape, ofm_shape, param.stride, kw, kh)
-                      : ::internal::valid_padding();
+                      ? neurun::util::same_padding(ifm_shape, ofm_shape, param.stride, kw, kh)
+                      : neurun::util::valid_padding();
 
   VERBOSE(MaxPool2D) << "IFM_H: " << ifm_shape.H << std::endl;
   VERBOSE(MaxPool2D) << "IFM_W: " << ifm_shape.W << std::endl;
@@ -338,8 +338,8 @@ void StageGenerator::visit(const graph::operation::AvgPool2DNode &node)
     uint32_t kw;
     uint32_t kh;
 
-    ::internal::Padding padding;
-    ::internal::Stride stride;
+    neurun::util::Padding padding;
+    neurun::util::Stride stride;
 
     // TODO Add 'activation' field
   };
@@ -356,8 +356,8 @@ void StageGenerator::visit(const graph::operation::AvgPool2DNode &node)
   param.stride.horizontal = hstride;
 
   param.padding = (padding_type == ANEURALNETWORKS_PADDING_SAME)
-                      ? ::internal::same_padding(ifm_shape, ofm_shape, param.stride, kw, kh)
-                      : ::internal::valid_padding();
+                      ? neurun::util::same_padding(ifm_shape, ofm_shape, param.stride, kw, kh)
+                      : neurun::util::valid_padding();
 
   VERBOSE(AvgPool2D) << "IFM_H: " << ifm_shape.H << std::endl;
   VERBOSE(AvgPool2D) << "IFM_W: " << ifm_shape.W << std::endl;
index 75ebf9a..b2278d6 100644 (file)
@@ -19,7 +19,7 @@
 #include <stdexcept>
 
 #include "cpp14/memory.h"
-#include "internal/Padding.h"
+#include "util/Padding.h"
 #include "kernel/cpu/OperationUtils.h"
 #include "kernel/cpu/ConvolutionLayer.h"
 #include "kernel/cpu/AvgPoolLayer.h"
@@ -71,7 +71,7 @@ void StageGenerator::visit(const graph::operation::Conv2DNode &node)
   assert((ANEURALNETWORKS_PADDING_SAME == padding_type) ||
          (ANEURALNETWORKS_PADDING_VALID == padding_type));
 
-  ::internal::Stride stride;
+  util::Stride stride;
 
   stride.vertical = _ctx.at(vstride_index).asScalar<int32_t>();
   stride.horizontal = _ctx.at(hstride_index).asScalar<int32_t>();
@@ -89,8 +89,8 @@ void StageGenerator::visit(const graph::operation::Conv2DNode &node)
     ::neurun::kernel::cpu::Shape ker_shape;
     ::neurun::kernel::cpu::Shape bias_shape;
 
-    ::internal::Padding padding;
-    ::internal::Stride stride;
+    util::Padding padding;
+    util::Stride stride;
 
     FuseCode activation;
   };
@@ -109,11 +109,11 @@ void StageGenerator::visit(const graph::operation::Conv2DNode &node)
 
   param.stride = stride;
   param.padding = (padding_type == ANEURALNETWORKS_PADDING_SAME)
-                      ? ::internal::same_padding(_ctx.at(ifm_index).shape().asFeature(),
-                                                 _ctx.at(ofm_index).shape().asFeature(), stride,
-                                                 _ctx.at(ker_index).shape().asKernel().W,
-                                                 _ctx.at(ker_index).shape().asKernel().H)
-                      : ::internal::valid_padding();
+                      ? util::same_padding(_ctx.at(ifm_index).shape().asFeature(),
+                                           _ctx.at(ofm_index).shape().asFeature(), stride,
+                                           _ctx.at(ker_index).shape().asKernel().W,
+                                           _ctx.at(ker_index).shape().asKernel().H)
+                      : util::valid_padding();
 
   param.activation = static_cast<FuseCode>(_ctx.at(activation_index).asScalar<int32_t>());
 
@@ -174,8 +174,8 @@ void StageGenerator::visit(const graph::operation::MaxPool2DNode &node)
     ::neurun::kernel::cpu::Shape ofm_shape;
     ::neurun::kernel::cpu::Shape ifm_shape;
 
-    ::internal::Padding padding;
-    ::internal::Stride stride;
+    util::Padding padding;
+    util::Stride stride;
 
     FuseCode activation;
   };
@@ -196,9 +196,9 @@ void StageGenerator::visit(const graph::operation::MaxPool2DNode &node)
 
   param.padding =
       (padding_type == ANEURALNETWORKS_PADDING_SAME)
-          ? ::internal::same_padding(_ctx.at(ifm_index).shape().asFeature(),
-                                     _ctx.at(ofm_index).shape().asFeature(), param.stride, kw, kh)
-          : ::internal::valid_padding();
+          ? util::same_padding(_ctx.at(ifm_index).shape().asFeature(),
+                               _ctx.at(ofm_index).shape().asFeature(), param.stride, kw, kh)
+          : util::valid_padding();
 
   param.activation = static_cast<FuseCode>(_ctx.at(activation_index).asScalar<int32_t>());
 
@@ -273,8 +273,8 @@ void StageGenerator::visit(const graph::operation::AvgPool2DNode &node)
     ::neurun::kernel::cpu::Shape ofm_shape;
     ::neurun::kernel::cpu::Shape ifm_shape;
 
-    ::internal::Padding padding;
-    ::internal::Stride stride;
+    util::Padding padding;
+    util::Stride stride;
 
     FuseCode activation;
   };
@@ -295,9 +295,9 @@ void StageGenerator::visit(const graph::operation::AvgPool2DNode &node)
 
   param.padding =
       (padding_type == ANEURALNETWORKS_PADDING_SAME)
-          ? ::internal::same_padding(_ctx.at(ifm_index).shape().asFeature(),
-                                     _ctx.at(ofm_index).shape().asFeature(), param.stride, kw, kh)
-          : ::internal::valid_padding();
+          ? util::same_padding(_ctx.at(ifm_index).shape().asFeature(),
+                               _ctx.at(ofm_index).shape().asFeature(), param.stride, kw, kh)
+          : util::valid_padding();
 
   param.activation = static_cast<FuseCode>(_ctx.at(activation_index).asScalar<int32_t>());
 
similarity index 95%
rename from runtimes/neurun/src/internal/Padding.cc
rename to runtimes/neurun/src/util/Padding.cc
index 200fa1a..9b5e654 100644 (file)
  * limitations under the License.
  */
 
-#include "internal/Padding.h"
+#include "util/Padding.h"
 
 #include <algorithm>
 
-namespace internal
+namespace neurun
+{
+namespace util
 {
 
 Padding valid_padding(void)
@@ -69,4 +71,5 @@ Padding same_padding(const nnfw::util::feature::Shape &ifm_shape,
   return padding;
 }
 
-} // namespace internal
+} // namespace util
+} // namespace neurun
similarity index 85%
rename from runtimes/neurun/src/internal/Padding.h
rename to runtimes/neurun/src/util/Padding.h
index 84e081a..71ad800 100644 (file)
  * limitations under the License.
  */
 
-#ifndef __INTERNAL_PADDING_H__
-#define __INTERNAL_PADDING_H__
+#ifndef __NEURUN_UTIL_PADDING_H__
+#define __NEURUN_UTIL_PADDING_H__
 
 #include <stdint.h>
 
 #include <util/feature/Shape.h>
 
-namespace internal
+namespace neurun
+{
+namespace util
 {
 
 struct Padding
@@ -43,6 +45,7 @@ Padding same_padding(const nnfw::util::feature::Shape &ifm_shape,
                      const nnfw::util::feature::Shape &ofm_shape, const Stride &stride, uint32_t kw,
                      uint32_t kh);
 
-} // namespace internal
+} // namespace util
+} // namespace neurun
 
-#endif // __INTERNAL_PADDING_H__
+#endif // __NEURUN_UTIL_PADDING_H__