[neurun] Remove usage of internal/nnapi/kernel (#3862)
author이한종/동작제어Lab(SR)/Engineer/삼성전자 <hanjoung.lee@samsung.com>
Wed, 5 Dec 2018 04:25:53 +0000 (13:25 +0900)
committer오형석/동작제어Lab(SR)/Staff Engineer/삼성전자 <hseok82.oh@samsung.com>
Wed, 5 Dec 2018 04:25:53 +0000 (13:25 +0900)
Now that feature is 4D, so this commit removes kernel usage for unifying
kernel and feature. This commit includes removal of dynamic_cast to
backend implementations, which had to be done with this work.

Removing files in `internal/nnapi/kernel` will be done in another
commit.

Signed-off-by: Hanjoung Lee <hanjoung.lee@samsung.com>
runtimes/neurun/src/compiler/ConstantInitializer.cc

index 1d40a15..a9f23be 100644 (file)
 #include "ConstantInitializer.h"
 
 #include "backend/interface/operand/IObject.h"
-#include "backend/acl_cl/operand/CLTensor.h"
 #include "backend/interface/IConfig.h"
-#include "backend/acl_cl/kernel/View.h"
 #include "backend/BackendManager.h"
-#include "internal/nnapi/kernel/Reader.h"
-#include "internal/nnapi/kernel/View.h"
-#include "util/kernel/IndexIterator.h"
 #include "graph/operation/FullyConnectedNode.h"
+#include "util/feature/nhwc/Reader.h"
+#include "util/feature/nhwc/View.h"
+#include "util/feature/nchw/View.h"
+#include "util/feature/IndexIterator.h"
 #include "logging.h"
 
 namespace neurun
@@ -58,7 +57,7 @@ void ConstantInitializer::operator()()
     auto layout =
         model_obj.lower_info()->def_backends().getOnlyElement()->config()->getOperandLayout();
     const auto shape = model_obj.shape();
-    auto base = model_obj.data().base();
+    auto base = reinterpret_cast<const float *>(model_obj.data().base());
     auto size = model_obj.data().size();
 
     obj.access([&](::neurun::backend::operand::ITensor &tensor) {
@@ -101,13 +100,13 @@ void ConstantInitializer::operator()()
           const auto ifm_shape = ifm.shape().asFeature();
           const auto num_output = shape.dim(0);
 
-          const ::nnfw::util::kernel::Shape ker_shape{num_output, ifm_shape.C, ifm_shape.H,
-                                                      ifm_shape.W};
-          const ::internal::nnapi::kernel::Reader<float> from{ker_shape, base, size};
+          const ::nnfw::util::feature::Shape ker_shape{num_output, ifm_shape.C, ifm_shape.H,
+                                                       ifm_shape.W};
+          const util::feature::nhwc::Reader<float> from{ker_shape, base, size};
 
           if (layout == neurun::graph::operand::Layout::NHWC)
           {
-            ::nnfw::util::kernel::iterate(ker_shape)
+            ::nnfw::util::feature::iterate(ker_shape)
                 << [&](uint32_t nth, uint32_t ch, uint32_t row, uint32_t col) {
                      const auto value = from.at(nth, ch, row, col);
 
@@ -128,7 +127,7 @@ void ConstantInitializer::operator()()
           {
             assert(layout == neurun::graph::operand::Layout::NCHW);
 
-            ::nnfw::util::kernel::iterate(ker_shape)
+            ::nnfw::util::feature::iterate(ker_shape)
                 << [&](uint32_t nth, uint32_t ch, uint32_t row, uint32_t col) {
                      const auto value = from.at(nth, ch, row, col);
 
@@ -150,30 +149,27 @@ void ConstantInitializer::operator()()
         }
         case 4:
         {
-          auto ker_shape = shape.asKernel();
-          auto from = ::internal::nnapi::kernel::Reader<float>{ker_shape, base, size};
+          auto ker_shape = shape.asFeature();
+          auto from = util::feature::nhwc::Reader<float>{ker_shape, base, size};
 
           if (layout == neurun::graph::operand::Layout::NHWC)
           {
-            auto cpu_tensor = dynamic_cast<::neurun::backend::cpu::operand::Tensor *>(&tensor);
+            auto into = util::feature::nhwc::View<float>{
+                ker_shape, reinterpret_cast<float *>(tensor.buffer()), size};
 
-            auto into = ::internal::nnapi::kernel::View<float>{cpu_tensor};
-
-            ::nnfw::util::kernel::iterate(ker_shape)
+            ::nnfw::util::feature::iterate(ker_shape)
                 << [&](uint32_t nth, uint32_t ch, uint32_t row, uint32_t col) {
                      const auto value = from.at(nth, ch, row, col);
-                     into.at(nth, row, col, ch) = value;
+                     into.at(nth, ch, row, col) = value;
                    };
           }
           else
           {
             assert(layout == neurun::graph::operand::Layout::NCHW);
 
-            auto acl_tensor = dynamic_cast<::neurun::backend::acl_cl::operand::CLTensor *>(&tensor);
-
-            auto into = ::internal::arm_compute::kernel::View<float>{acl_tensor};
+            auto into = util::feature::nchw::View<float>{&tensor};
 
-            ::nnfw::util::kernel::iterate(ker_shape)
+            ::nnfw::util::feature::iterate(ker_shape)
                 << [&](uint32_t nth, uint32_t ch, uint32_t row, uint32_t col) {
                      const auto value = from.at(nth, ch, row, col);
                      into.at(nth, ch, row, col) = value;