[neurun] Extract asXX helpers into seperate file (#2278)
author김수진/동작제어Lab(SR)/Engineer/삼성전자 <sjsujin.kim@samsung.com>
Tue, 14 Aug 2018 02:08:25 +0000 (11:08 +0900)
committer박세희/동작제어Lab(SR)/Principal Engineer/삼성전자 <saehie.park@samsung.com>
Tue, 14 Aug 2018 02:08:25 +0000 (11:08 +0900)
* [neurun] Extract asXX casting helpers into Cast.h

This commit extracts asXX casting helpers into Cast.h likes PACL(https://github.sec.samsung.net/STAR/nnfw/blob/master/runtimes/pure_arm_compute/src/internal/arm_compute/Cast.h)

Signed-off-by: sjsujinkim <sjsujin.kim@samsung.com>
* Change file name/add namespace/seperate source file

* Add empty line

runtimes/neurun/src/compilation.cc
runtimes/neurun/src/internal/Convert.cc [new file with mode: 0644]
runtimes/neurun/src/internal/Convert.h [new file with mode: 0644]

index b0720c8..c7a63a1 100644 (file)
@@ -7,6 +7,7 @@
 #include <arm_compute/runtime/IFunction.h>
 #include <arm_compute/runtime/CL/CLScheduler.h>
 
+#include "internal/Convert.h"
 #include "backend/acl_cl/kernel/View.h"
 #include "backend/acl_cl/TensorBuilder.h"
 #include "internal/nnapi/kernel/Reader.h"
 #include "model.h"
 #include "logging.h"
 
-::arm_compute::TensorShape asTensorShape(int32_t h, int32_t w)
-{
-  return ::arm_compute::TensorShape(w, h);
-}
-
-::arm_compute::TensorShape asTensorShape(const nnfw::util::feature::Shape &shape)
-{
-  return ::arm_compute::TensorShape(shape.W, shape.H, shape.C, 1);
-}
-
-::arm_compute::TensorShape asTensorShape(const nnfw::util::kernel::Shape &shape)
-{
-  return ::arm_compute::TensorShape(shape.W, shape.H, shape.C, shape.N);
-}
-
-::arm_compute::TensorInfo asTensorInfo(const nnfw::util::feature::Shape &shape)
-{
-  return ::arm_compute::TensorInfo(asTensorShape(shape), 1, ::arm_compute::DataType::F32);
-}
-
-::arm_compute::TensorInfo asTensorInfo(const nnfw::util::kernel::Shape &shape)
-{
-  return ::arm_compute::TensorInfo(asTensorShape(shape), 1, ::arm_compute::DataType::F32);
-}
-
-::arm_compute::TensorInfo asTensorInfo(int32_t size)
-{
-  return ::arm_compute::TensorInfo(::arm_compute::TensorShape(size), 1,
-                                   ::arm_compute::DataType::F32);
-}
-
-::arm_compute::TensorInfo asTensorInfo(int32_t h, int32_t w)
-{
-  return ::arm_compute::TensorInfo(::arm_compute::TensorShape(w, h), 1,
-                                   ::arm_compute::DataType::F32);
-}
-
 #include "codegen/IPlanBuilder.h"
 #include "codegen/BackendResolver.h"
 
@@ -103,10 +67,10 @@ void Planner::visit(const ::internal::tflite::op::Conv2D::implicit::Node &node)
   const auto bias_size = _ctx.at(bias_index).shape().asVector();
 
   // Set Shape Constraints
-  _builder.addShapeConstr(ofm_index, asTensorInfo(ofm_shape));
-  _builder.addShapeConstr(ifm_index, asTensorInfo(ifm_shape));
-  _builder.addShapeConstr(ker_index, asTensorInfo(ker_shape));
-  _builder.addShapeConstr(bias_index, asTensorInfo(bias_size));
+  _builder.addShapeConstr(ofm_index, ::internal::asTensorInfo(ofm_shape));
+  _builder.addShapeConstr(ifm_index, ::internal::asTensorInfo(ifm_shape));
+  _builder.addShapeConstr(ker_index, ::internal::asTensorInfo(ker_shape));
+  _builder.addShapeConstr(bias_index, ::internal::asTensorInfo(bias_size));
 
   // Generate Initializers
   auto init_gen = _backend_resolver.getInitializerGenerator(typeid(node));
@@ -127,8 +91,8 @@ void Planner::visit(const ::internal::tflite::op::MaxPool2D::implicit::Node &nod
   const auto ifm_shape = _ctx.at(ifm_index).shape().asFeature();
 
   // Set Shape Constraints
-  _builder.addShapeConstr(ofm_index, asTensorInfo(ofm_shape));
-  _builder.addShapeConstr(ifm_index, asTensorInfo(ifm_shape));
+  _builder.addShapeConstr(ofm_index, ::internal::asTensorInfo(ofm_shape));
+  _builder.addShapeConstr(ifm_index, ::internal::asTensorInfo(ifm_shape));
 
   // Generate Stage
   auto stage_gen = _backend_resolver.getStageGenerator(typeid(node));
@@ -144,8 +108,8 @@ void Planner::visit(const ::internal::tflite::op::AvgPool2D::implicit::Node &nod
   const auto ifm_shape = _ctx.at(ifm_index).shape().asFeature();
 
   // Set Shape Constraints
-  _builder.addShapeConstr(ofm_index, asTensorInfo(ofm_shape));
-  _builder.addShapeConstr(ifm_index, asTensorInfo(ifm_shape));
+  _builder.addShapeConstr(ofm_index, ::internal::asTensorInfo(ofm_shape));
+  _builder.addShapeConstr(ifm_index, ::internal::asTensorInfo(ifm_shape));
 
   // Generate Stage
   auto stage_gen = _backend_resolver.getStageGenerator(typeid(node));
@@ -166,7 +130,7 @@ void Planner::visit(const ::internal::tflite::op::Concat::Node &node)
          3);
 
   // Set Shape Constraints (for output)
-  _builder.addShapeConstr(ofm_index, asTensorInfo(ofm_shape));
+  _builder.addShapeConstr(ofm_index, ::internal::asTensorInfo(ofm_shape));
 
   // Set Shape Constraints (for input)
   uint32_t depth = 0;
@@ -175,7 +139,7 @@ void Planner::visit(const ::internal::tflite::op::Concat::Node &node)
   {
     const ::internal::tflite::operand::Index ifm_index{index};
     const auto ifm_shape = _ctx.at(ifm_index).shape().asFeature();
-    _builder.addShapeConstr(ifm_index, asTensorInfo(ifm_shape));
+    _builder.addShapeConstr(ifm_index, ::internal::asTensorInfo(ifm_shape));
   }
 
   // Generate Stage
@@ -210,10 +174,11 @@ void Planner::visit(const ::internal::tflite::op::FullyConnected::Node &node)
   const auto bias_size = _ctx.at(bias_index).shape().asVector();
 
   // Set Shape Constraints
-  _builder.addShapeConstr(output_index, asTensorInfo(output_size));
-  _builder.addShapeConstr(input_index, asTensorInfo(ifm_shape));
-  _builder.addShapeConstr(weight_index, asTensorInfo(num_output /*H*/, input_size /*W*/));
-  _builder.addShapeConstr(bias_index, asTensorInfo(bias_size));
+  _builder.addShapeConstr(output_index, ::internal::asTensorInfo(output_size));
+  _builder.addShapeConstr(input_index, ::internal::asTensorInfo(ifm_shape));
+  _builder.addShapeConstr(weight_index,
+                          ::internal::asTensorInfo(num_output /*H*/, input_size /*W*/));
+  _builder.addShapeConstr(bias_index, ::internal::asTensorInfo(bias_size));
 
   // Generate Initializers
   auto init_gen = _backend_resolver.getInitializerGenerator(typeid(node));
@@ -247,8 +212,8 @@ void Planner::visit(const ::internal::tflite::op::Reshape::Node &node)
   assert(ifm_shape.W == 1);
   assert((ifm_shape.C * ifm_shape.H * ifm_shape.W) == out_size);
 
-  _builder.addShapeConstr(output_index, asTensorInfo(out_size));
-  _builder.addShapeConstr(input_index, asTensorInfo(ifm_shape));
+  _builder.addShapeConstr(output_index, ::internal::asTensorInfo(out_size));
+  _builder.addShapeConstr(input_index, ::internal::asTensorInfo(ifm_shape));
 
   // Generate Stage
   auto stage_gen = _backend_resolver.getStageGenerator(typeid(node));
@@ -272,8 +237,8 @@ void Planner::visit(const ::internal::tflite::op::Softmax::Node &node)
 
   const uint32_t len = _ctx.at(output_index).shape().dim(1);
 
-  _builder.addShapeConstr(output_index, asTensorInfo(len));
-  _builder.addShapeConstr(input_index, asTensorInfo(len));
+  _builder.addShapeConstr(output_index, ::internal::asTensorInfo(len));
+  _builder.addShapeConstr(input_index, ::internal::asTensorInfo(len));
 
   // Generate Stage
   auto stage_gen = _backend_resolver.getStageGenerator(typeid(node));
diff --git a/runtimes/neurun/src/internal/Convert.cc b/runtimes/neurun/src/internal/Convert.cc
new file mode 100644 (file)
index 0000000..fa577f9
--- /dev/null
@@ -0,0 +1,43 @@
+#include "Convert.h"
+
+namespace internal
+{
+
+::arm_compute::TensorShape asTensorShape(int32_t h, int32_t w)
+{
+  return ::arm_compute::TensorShape(w, h);
+}
+
+::arm_compute::TensorShape asTensorShape(const nnfw::util::feature::Shape &shape)
+{
+  return ::arm_compute::TensorShape(shape.W, shape.H, shape.C, 1);
+}
+
+::arm_compute::TensorShape asTensorShape(const nnfw::util::kernel::Shape &shape)
+{
+  return ::arm_compute::TensorShape(shape.W, shape.H, shape.C, shape.N);
+}
+
+::arm_compute::TensorInfo asTensorInfo(const nnfw::util::feature::Shape &shape)
+{
+  return ::arm_compute::TensorInfo(asTensorShape(shape), 1, ::arm_compute::DataType::F32);
+}
+
+::arm_compute::TensorInfo asTensorInfo(const nnfw::util::kernel::Shape &shape)
+{
+  return ::arm_compute::TensorInfo(asTensorShape(shape), 1, ::arm_compute::DataType::F32);
+}
+
+::arm_compute::TensorInfo asTensorInfo(int32_t size)
+{
+  return ::arm_compute::TensorInfo(::arm_compute::TensorShape(size), 1,
+                                   ::arm_compute::DataType::F32);
+}
+
+::arm_compute::TensorInfo asTensorInfo(int32_t h, int32_t w)
+{
+  return ::arm_compute::TensorInfo(::arm_compute::TensorShape(w, h), 1,
+                                   ::arm_compute::DataType::F32);
+}
+
+} // namespace internal
diff --git a/runtimes/neurun/src/internal/Convert.h b/runtimes/neurun/src/internal/Convert.h
new file mode 100644 (file)
index 0000000..55a9cb7
--- /dev/null
@@ -0,0 +1,23 @@
+#ifndef __INTERNAL_CONVERT_H__
+#define __INTERNAL_CONVERT_H__
+
+#include <arm_compute/core/TensorInfo.h>
+#include <arm_compute/core/TensorShape.h>
+
+#include "internal/Model.h"
+
+namespace internal
+{
+
+::arm_compute::TensorShape asTensorShape(int32_t h, int32_t w);
+::arm_compute::TensorShape asTensorShape(const nnfw::util::feature::Shape &shape);
+::arm_compute::TensorShape asTensorShape(const nnfw::util::kernel::Shape &shape);
+
+::arm_compute::TensorInfo asTensorInfo(const nnfw::util::feature::Shape &shape);
+::arm_compute::TensorInfo asTensorInfo(const nnfw::util::kernel::Shape &shape);
+::arm_compute::TensorInfo asTensorInfo(int32_t size);
+::arm_compute::TensorInfo asTensorInfo(int32_t h, int32_t w);
+
+} // namespace internal
+
+#endif // __INTERNAL_CONVERT_H__