[neurun] Move CPU Tensor implementation to backend (#2509)
author이한종/동작제어Lab(SR)/Engineer/삼성전자 <hanjoung.lee@samsung.com>
Wed, 29 Aug 2018 01:44:28 +0000 (10:44 +0900)
committer오형석/동작제어Lab(SR)/Staff Engineer/삼성전자 <hseok82.oh@samsung.com>
Wed, 29 Aug 2018 01:44:28 +0000 (10:44 +0900)
Move `internal/cpu.{h|cc}` to cpu backend directory.

Signed-off-by: Hanjoung Lee <hanjoung.lee@samsung.com>
runtimes/neurun/src/backend/cpu/StageGenerator.h
runtimes/neurun/src/backend/cpu/TensorBuilder.cc
runtimes/neurun/src/backend/cpu/TensorBuilder.h
runtimes/neurun/src/backend/cpu/operand/Tensor.cc [new file with mode: 0644]
runtimes/neurun/src/backend/cpu/operand/Tensor.h [moved from runtimes/neurun/src/internal/cpu.h with 76% similarity]
runtimes/neurun/src/internal/Sink.h
runtimes/neurun/src/internal/Source.h
runtimes/neurun/src/internal/cpu.cc [deleted file]

index 06b6336..a39e7d0 100644 (file)
@@ -4,7 +4,7 @@
 #include "backend/IStageGenerator.h"
 
 #include "graph/operand/Set.h"
-#include "internal/cpu.h"
+#include "backend/cpu/operand/Tensor.h"
 #include "TensorBuilder.h"
 
 namespace neurun
index e8e9765..3b7f446 100644 (file)
@@ -30,7 +30,7 @@ void TensorBuilder::prepare(const std::map<int, ::arm_compute::TensorInfo> &tens
   for (auto ind_int : _inds)
   {
     ::neurun::graph::operand::Index ind{ind_int};
-    auto tensor = std::make_shared<::internal::cpu::Tensor>(tensor_info_ctx.at(ind.asInt()));
+    auto tensor = std::make_shared<operand::Tensor>(tensor_info_ctx.at(ind.asInt()));
     // TODO Fix allocation here. When Tensor object is created the memory for tensor is also
     //      allocated, and this must be fixed.
     _plan.operands().set(ind, std::make_shared<operand::Object>(tensor));
@@ -46,8 +46,7 @@ void TensorBuilder::allocate(void)
   //      See also: comment in `prepare()`
 }
 
-std::shared_ptr<::internal::cpu::Tensor>
-TensorBuilder::at(const ::neurun::graph::operand::Index &ind)
+std::shared_ptr<operand::Tensor> TensorBuilder::at(const ::neurun::graph::operand::Index &ind)
 {
   return _tensors.at(ind.asInt());
 }
index 0f2bf42..484a3e5 100644 (file)
@@ -5,7 +5,7 @@
 #include <unordered_set>
 
 #include "backend/ITensorBuilder.h"
-#include "internal/cpu.h"
+#include "backend/cpu/operand/Tensor.h"
 #include "codegen/Plan.h"
 
 namespace neurun
@@ -26,12 +26,12 @@ public:
   virtual void prepare(const std::map<int, ::arm_compute::TensorInfo> &tensor_info_ctx) override;
   virtual void allocate(void) override;
 
-  std::shared_ptr<::internal::cpu::Tensor> at(const ::neurun::graph::operand::Index &ind);
+  std::shared_ptr<operand::Tensor> at(const ::neurun::graph::operand::Index &ind);
 
 private:
   codegen::Plan &_plan;
   std::unordered_set<int> _inds;
-  std::unordered_map<int, std::shared_ptr<::internal::cpu::Tensor>> _tensors;
+  std::unordered_map<int, std::shared_ptr<operand::Tensor>> _tensors;
 };
 
 } // namespace cpu
diff --git a/runtimes/neurun/src/backend/cpu/operand/Tensor.cc b/runtimes/neurun/src/backend/cpu/operand/Tensor.cc
new file mode 100644 (file)
index 0000000..bc6b226
--- /dev/null
@@ -0,0 +1,17 @@
+#include "Tensor.h"
+
+namespace neurun
+{
+namespace backend
+{
+namespace cpu
+{
+namespace operand
+{
+
+// NO IMPLEMENTATION YET
+
+} // namespace operand
+} // namespace cpu
+} // namespace backend
+} // namespace neurun
similarity index 76%
rename from runtimes/neurun/src/internal/cpu.h
rename to runtimes/neurun/src/backend/cpu/operand/Tensor.h
index c3f03ef..b08e3c9 100644 (file)
@@ -1,22 +1,17 @@
-#ifndef __INTERNAL_CPU_H__
-#define __INTERNAL_CPU_H__
+#ifndef __NEURUN_BACKEND_CPU_OPERAND_TENSOR_H__
+#define __NEURUN_BACKEND_CPU_OPERAND_TENSOR_H__
 
 #include <arm_compute/core/ITensor.h>
 #include <arm_compute/core/TensorInfo.h>
-#include <arm_compute/core/CL/OpenCL.h>
 
-#include <unistd.h>
-#include <sys/mman.h>
-#include <sys/types.h>
-#include <stdio.h>
-#include <stdlib.h>
-
-#include <iostream>
-
-namespace internal
+namespace neurun
+{
+namespace backend
 {
 namespace cpu
 {
+namespace operand
+{
 
 class Tensor : public ::arm_compute::ITensor
 {
@@ -53,7 +48,9 @@ private:
   uint8_t *_buffer = nullptr;
 };
 
-} // cpu
-} // internal
+} // namespace operand
+} // namespace cpu
+} // namespace backend
+} // namespace neurun
 
-#endif // __INTERNAL_CPU_H__
+#endif // __NEURUN_BACKEND_CPU_OPERAND_TENSOR_H__
index 7c29aaf..e300d03 100644 (file)
@@ -8,7 +8,7 @@
 #include <util/feature/Shape.h>
 #include <util/feature/IndexIterator.h>
 
-#include "internal/cpu.h"
+#include "backend/cpu/operand/Tensor.h" // TODO Remove this dependency to backend
 #include "internal/nnapi/feature/View.h"
 #include "internal/nnapi/feature/Reader.h"
 
@@ -65,7 +65,7 @@ public:
   void pull(::arm_compute::ITensor &tensor) const override
   {
     // TODO: This is just workaround codes, It needs to refactor.
-    if (typeid(tensor) == typeid(::internal::cpu::Tensor))
+    if (typeid(tensor) == typeid(neurun::backend::cpu::operand::Tensor))
     {
       const ::internal::nnapi::feature::Reader<float> from{_shape, tensor.buffer(), _size};
       ::internal::nnapi::feature::View<float> into{_shape, _base, _size};
index 00bc28e..7d626fd 100644 (file)
@@ -8,7 +8,7 @@
 #include <util/feature/Shape.h>
 #include <util/feature/IndexIterator.h>
 
-#include "internal/cpu.h"
+#include "backend/cpu/operand/Tensor.h" // TODO Remove this dependency to backend
 #include "internal/nnapi/feature/Reader.h"
 #include "internal/nnapi/feature/View.h"
 
@@ -68,7 +68,7 @@ public:
   void push(::arm_compute::ITensor &tensor) const override
   {
     // TODO: This is just workaround codes, It needs to refactor.
-    if (typeid(tensor) == typeid(::internal::cpu::Tensor))
+    if (typeid(tensor) == typeid(neurun::backend::cpu::operand::Tensor))
     {
       const ::internal::nnapi::feature::Reader<float> from{_shape, _base, _size};
       ::internal::nnapi::feature::View<float> into{_shape, tensor.buffer(), _size};
diff --git a/runtimes/neurun/src/internal/cpu.cc b/runtimes/neurun/src/internal/cpu.cc
deleted file mode 100644 (file)
index d8473d8..0000000
+++ /dev/null
@@ -1 +0,0 @@
-#include "internal/cpu.h"