[neurun] Remove backend/cpu dependancy from PermutateSource/Sink (#3889)
authorДилшоджон Умронхонович Пошшоев/AI Tools Lab /SRR/Engineer/삼성전자 <d.poshshoev@samsung.com>
Thu, 6 Dec 2018 07:25:33 +0000 (10:25 +0300)
committer박세희/동작제어Lab(SR)/Principal Engineer/삼성전자 <saehie.park@samsung.com>
Thu, 6 Dec 2018 07:25:33 +0000 (16:25 +0900)
Related issue: #3888
Remove backend/cpu dependancy from PermutateSource/Sink

Signed-off-by: Poshshoev Dilshodzhon <d.poshshoev@samsung.com>
runtimes/neurun/src/exec/Sink.h
runtimes/neurun/src/exec/Source.h
runtimes/neurun/src/frontend/execution.cc

index d39c37d..086010f 100644 (file)
@@ -20,8 +20,6 @@
 #include <cassert>
 
 #include "nnfw/std/memory.h"
-#include "kernel/cpu/PermuteLayer.h"
-#include "backend/cpu/operand/Tensor.h"
 #include "util/feature/nhwc/View.h"
 #include "util/feature/nchw/View.h"
 #include <util/feature/IndexIterator.h>
@@ -62,8 +60,8 @@ private:
 class PermutateSink final : public ISink
 {
 public:
-  PermutateSink(neurun::backend::cpu::operand::Tensor output, const graph::operand::Shape &shape)
-      : _output{output}, _shape{shape}
+  PermutateSink(void *output_buffer, const size_t &output_size, const graph::operand::Shape &shape)
+      : _output_buffer{(uint8_t *)output_buffer}, _output_size{output_size}, _shape{shape}
   {
   }
 
@@ -72,9 +70,6 @@ public:
   {
     // do NCHW_TO_NHWC permutation
     auto input_buffer = tensor.buffer();
-
-    auto output_buffer = _output.buffer();
-    auto output_size = _output.total_size();
     auto rank = _shape.rank();
 
     auto input_cl = dynamic_cast<::neurun::backend::acl_cl::operand::ICLTensor *>(&tensor);
@@ -83,7 +78,7 @@ public:
       case 0:
       case 1:
       {
-        memcpy(output_buffer, input_buffer, output_size);
+        memcpy(_output_buffer, input_buffer, _output_size);
         break;
       }
       case 2:
@@ -101,7 +96,8 @@ public:
         const auto &y = window[Window::DimY];
         for (auto h = y.start(); h < y.end(); h += y.step(), it.increment(Window::DimY))
         {
-          memcpy(output_buffer + h * output_width, it.ptr(), output_width * sizeof(output_buffer));
+          memcpy(_output_buffer + h * output_width, it.ptr(),
+                 output_width * sizeof(_output_buffer));
         }
         break;
       }
@@ -124,8 +120,8 @@ public:
         {
           for (auto h = y.start(); h < y.end(); h += y.step(), it.increment(Window::DimY))
           {
-            memcpy(output_buffer + c * height_width + h * width, it.ptr(),
-                   width * sizeof(output_buffer));
+            memcpy(_output_buffer + c * height_width + h * width, it.ptr(),
+                   width * sizeof(_output_buffer));
           }
         }
         break;
@@ -136,8 +132,8 @@ public:
 
         // TODO Fix this workaround (We may need codegen::operand::Object instead of ITensor)
         const util::feature::nchw::View<float> from{input_cl};
-        util::feature::nhwc::View<float> into{feature, reinterpret_cast<float *>(output_buffer),
-                                              output_size};
+        util::feature::nhwc::View<float> into{feature, reinterpret_cast<float *>(_output_buffer),
+                                              _output_size};
 
         ::nnfw::util::feature::iterate(feature)
             << [&](uint32_t batch, uint32_t ch, uint32_t row, uint32_t col) {
@@ -153,7 +149,8 @@ public:
   }
 
 private:
-  const neurun::backend::cpu::operand::Tensor _output;
+  uint8_t *_output_buffer;
+  const size_t _output_size;
   const graph::operand::Shape _shape;
 };
 
index 7953d33..2e33bd2 100644 (file)
@@ -19,9 +19,7 @@
 
 #include <cassert>
 
-#include "kernel/cpu/PermuteLayer.h"
 #include "nnfw/std/memory.h"
-#include "backend/cpu/operand/Tensor.h"
 #include "util/feature/nchw/View.h"
 #include "util/feature/nhwc/Reader.h"
 #include <util/feature/IndexIterator.h>
@@ -62,8 +60,9 @@ private:
 class PermutateSource final : public ISource
 {
 public:
-  PermutateSource(neurun::backend::cpu::operand::Tensor input, const graph::operand::Shape &shape)
-      : _input{input}, _shape{shape}
+  PermutateSource(const void *input_buffer, const size_t &input_size,
+                  const graph::operand::Shape &shape)
+      : _input_buffer{(uint8_t *)input_buffer}, _input_size{input_size}, _shape{shape}
   {
   }
 
@@ -71,9 +70,6 @@ public:
   void push(neurun::backend::operand::ITensor &tensor) const override
   {
     // do NHWC_TO_NCHW permutation
-    auto input_buffer = _input.buffer();
-    auto input_size = _input.total_size();
-
     auto output_buffer = tensor.buffer();
     auto rank = _shape.rank();
 
@@ -83,7 +79,7 @@ public:
       case 0:
       case 1:
       {
-        memcpy(output_buffer, input_buffer, input_size);
+        memcpy(output_buffer, _input_buffer, _input_size);
         break;
       }
       case 2:
@@ -101,8 +97,8 @@ public:
         const auto &y = window[Window::DimY];
         for (auto h = y.start(); h < y.end(); h += y.step(), it.increment(Window::DimY))
         {
-          memcpy(it.ptr(), input_buffer + h * matrix_shape.W,
-                 matrix_shape.W * sizeof(input_buffer));
+          memcpy(it.ptr(), _input_buffer + h * matrix_shape.W,
+                 matrix_shape.W * sizeof(_input_buffer));
         }
         break;
       }
@@ -125,8 +121,8 @@ public:
         {
           for (auto h = y.start(); h < y.end(); h += y.step(), it.increment(Window::DimY))
           {
-            memcpy(it.ptr(), input_buffer + c * height_width + h * width,
-                   width * sizeof(input_buffer));
+            memcpy(it.ptr(), _input_buffer + c * height_width + h * width,
+                   width * sizeof(_input_buffer));
           }
         }
         break;
@@ -136,7 +132,7 @@ public:
         auto feature = _shape.asFeature();
 
         const util::feature::nhwc::Reader<float> from{
-            feature, reinterpret_cast<const float *>(input_buffer), input_size};
+            feature, reinterpret_cast<const float *>(_input_buffer), _input_size};
         util::feature::nchw::View<float> into{output_cl};
 
         // TODO Fix this workaround (We may need codegen::operand::Object instead of ITensor)
@@ -154,7 +150,8 @@ public:
   }
 
 private:
-  const neurun::backend::cpu::operand::Tensor _input;
+  const uint8_t *_input_buffer;
+  const size_t _input_size;
   const graph::operand::Shape _shape;
 };
 
index f35db59..14a8eaf 100644 (file)
@@ -24,8 +24,6 @@
 
 #include "graph/operand/DataType.h"
 #include "graph/operand/Index.h"
-#include "kernel/cpu/PermuteLayer.h"
-#include "backend/cpu/operand/Tensor.h"
 #include "graph/operand/Layout.h"
 #include "backend/BackendManager.h"
 #include "backend/interface/IConfig.h"
@@ -55,10 +53,9 @@ inline void source(ANeuralNetworksExecution *execution,
       output_layout == neurun::graph::operand::Layout::NCHW)
   {
     const auto tensor_info = neurun::compiler::TensorInfo(operand->shape(), operand->typeInfo());
-    auto tensor_from_interp = neurun::backend::cpu::operand::Tensor(tensor_info);
-    tensor_from_interp.setBuffer((uint8_t *)buffer);
 
-    execution->source<::neurun::exec::PermutateSource>(index, tensor_from_interp, operand->shape());
+    execution->source<::neurun::exec::PermutateSource>(index, buffer, tensor_info.total_size(),
+                                                       operand->shape());
     return;
   }
   using ::neurun::graph::operand::DataType;
@@ -110,10 +107,9 @@ inline void sink(ANeuralNetworksExecution *execution,
       output_layout == neurun::graph::operand::Layout::NHWC)
   {
     const auto tensor_info = neurun::compiler::TensorInfo(operand->shape(), operand->typeInfo());
-    auto tensor_from_interp = neurun::backend::cpu::operand::Tensor(tensor_info);
-    tensor_from_interp.setBuffer((uint8_t *)buffer);
 
-    execution->sink<::neurun::exec::PermutateSink>(index, tensor_from_interp, operand->shape());
+    execution->sink<::neurun::exec::PermutateSink>(index, buffer, tensor_info.total_size(),
+                                                   operand->shape());
     return;
   }
   using ::neurun::graph::operand::DataType;