Change dimension of ACLTensor to start from higher dimension (#6765)
author장지섭/On-Device Lab(SR)/Engineer/삼성전자 <jiseob.jang@samsung.com>
Wed, 21 Aug 2019 10:12:27 +0000 (19:12 +0900)
committer오형석/On-Device Lab(SR)/Staff Engineer/삼성전자 <hseok82.oh@samsung.com>
Wed, 21 Aug 2019 10:12:27 +0000 (19:12 +0900)
This commit changes dimension function of ACLTensor to start from higher dimension.

Signed-off-by: jiseob.jang <jiseob.jang@samsung.com>
15 files changed:
runtimes/neurun/backend/acl_cl/kernel/ConcatLayer.cc
runtimes/neurun/backend/acl_cl/operand/CLSubTensor.cc
runtimes/neurun/backend/acl_cl/operand/CLSubTensor.h
runtimes/neurun/backend/acl_cl/operand/CLTensor.cc
runtimes/neurun/backend/acl_cl/operand/CLTensor.h
runtimes/neurun/backend/acl_common/AclMemoryManager.h
runtimes/neurun/backend/acl_common/IACLTensor.cc
runtimes/neurun/backend/acl_common/IACLTensor.h
runtimes/neurun/backend/acl_common/TemplTensorBuilder.h
runtimes/neurun/backend/acl_neon/kernel/ConcatLayer.cc
runtimes/neurun/backend/acl_neon/operand/NESubTensor.cc
runtimes/neurun/backend/acl_neon/operand/NESubTensor.h
runtimes/neurun/backend/acl_neon/operand/NETensor.cc
runtimes/neurun/backend/acl_neon/operand/NETensor.h
runtimes/neurun/core/include/util/feature/nchw/View.h

index de6b3c0..db4e04e 100644 (file)
@@ -37,7 +37,7 @@ inline bool matchSizeExceptAxis(const ::neurun::backend::acl_cl::operand::ICLTen
   {
     if (axis == i)
       continue;
-    if (t1->dimension(i) != t2->dimension(i))
+    if (t1->info()->dimension(i) != t2->info()->dimension(i))
       return false;
   }
   return true;
@@ -72,10 +72,10 @@ template <typename T> bool ConcatLayer::concatenate()
     {
       assert(_output_alloc->ptr()->layout() == input->ptr()->layout());
       assert(matchSizeExceptAxis(_output_alloc->ptr(), input->ptr(), _axis));
-      axis_sum += input->ptr()->dimension(_axis);
+      axis_sum += input->ptr()->info()->dimension(_axis);
     }
 
-    assert(_output_alloc->ptr()->dimension(_axis) == axis_sum);
+    assert(_output_alloc->ptr()->info()->dimension(_axis) == axis_sum);
   }
 
   VERBOSE(Concat_RUN) << "START Concat" << std::endl;
index 9b3fd3c..70c8829 100644 (file)
@@ -26,9 +26,10 @@ namespace operand
 {
 
 CLSubTensor::CLSubTensor(ICLTensor *parent, const arm_compute::TensorShape &tensor_shape,
-                         const arm_compute::Coordinates &coords, bool extend_parent)
+                         const arm_compute::Coordinates &coords, size_t rank, bool extend_parent)
     : _cl_sub_tensor(std::make_shared<arm_compute::CLSubTensor>(parent->handle(), tensor_shape,
-                                                                coords, extend_parent))
+                                                                coords, extend_parent)),
+      _rank{rank}
 {
   // DO NOTHING
 }
index ff15223..8eba376 100644 (file)
@@ -37,7 +37,10 @@ public:
 
 public:
   CLSubTensor(ICLTensor *parent, const arm_compute::TensorShape &tensor_shape,
-              const arm_compute::Coordinates &coords, bool extend_parent = false);
+              const arm_compute::Coordinates &coords, size_t rank, bool extend_parent = false);
+
+public:
+  size_t num_dimensions() const final { return _rank; }
 
 public:
   const arm_compute::CLSubTensor *handle() const override;
@@ -49,6 +52,7 @@ public:
 
 private:
   std::shared_ptr<arm_compute::CLSubTensor> _cl_sub_tensor;
+  size_t _rank;
 };
 
 } // namespace operand
index 4c78d31..6153fc2 100644 (file)
@@ -31,8 +31,8 @@ namespace acl_cl
 namespace operand
 {
 
-CLTensor::CLTensor(const arm_compute::TensorInfo &info)
-    : _cl_tensor(std::make_shared<arm_compute::CLTensor>())
+CLTensor::CLTensor(const arm_compute::TensorInfo &info, size_t rank)
+    : _cl_tensor(std::make_shared<arm_compute::CLTensor>()), _rank{rank}
 {
   allocator()->init(info);
 }
index d7ffdfa..9528516 100644 (file)
@@ -38,7 +38,10 @@ public:
   CLTensor() = delete;
 
 public:
-  CLTensor(const arm_compute::TensorInfo &info);
+  CLTensor(const arm_compute::TensorInfo &info, size_t rank);
+
+public:
+  size_t num_dimensions() const final { return _rank; }
 
 public:
   const arm_compute::CLTensor *handle() const override;
@@ -59,6 +62,7 @@ public:
 
 private:
   std::shared_ptr<arm_compute::CLTensor> _cl_tensor;
+  size_t _rank;
 };
 
 } // namespace operand
index 8003b0e..b7897f9 100644 (file)
@@ -69,9 +69,10 @@ public:
     return nullptr;
   }
 
-  void buildTensor(const model::OperandIndex &ind, const ::arm_compute::TensorInfo &info)
+  void buildTensor(const model::OperandIndex &ind, const ::arm_compute::TensorInfo &info,
+                   size_t rank)
   {
-    auto tensor = std::make_shared<T_Tensor>(info);
+    auto tensor = std::make_shared<T_Tensor>(info, rank);
     _tensors[ind] = tensor;
   }
 
index 47de18f..26ef071 100644 (file)
@@ -25,6 +25,21 @@ namespace backend
 namespace acl_common
 {
 
+size_t IACLTensor::num_dimensions() const
+{
+  throw std::runtime_error("No definition of num_dimensions()");
+  return 0;
+}
+
+size_t IACLTensor::dimension(size_t index) const
+{
+  // Assume that the front is higher dimensional.
+  // i.g. N: 0, C: 1, H: 2, W: 3 for NCHW layout
+  // NOTE This tensor must not be applied dim correction
+  const ARMComputeAxis reversed{(num_dimensions() - index) - 1};
+  return info()->dimension(reversed.value());
+}
+
 size_t IACLTensor::calcOffset(const neurun::util::Coordinates &coords)
 {
   const auto rank = coords.size();
index dd79c09..bfda841 100644 (file)
@@ -39,8 +39,8 @@ public:
 public:
   uint8_t *buffer() const final { return handle()->buffer(); }
   size_t total_size() const final { return info()->total_size(); }
-  size_t dimension(size_t index) const final { return info()->dimension(index); }
-  size_t num_dimensions() const final { return info()->num_dimensions(); }
+  size_t dimension(size_t index) const final;
+  size_t num_dimensions() const override;
   size_t calcOffset(const neurun::util::Coordinates &coords) final;
   model::Layout layout() const final;
   bool has_padding() const override { return info()->has_padding(); }
index 70842e9..6bb0e00 100644 (file)
@@ -321,7 +321,7 @@ void TemplTensorBuilder<T_ITensor, T_Tensor, T_SubTensor, T_Object>::buildTensor
     const auto &backend_layout = _tensor_layouts_map[root_parent].second;
     auto tensor_info = asTensorInfo(info.shape(), info.typeInfo(), frontend_layout, backend_layout,
                                     _apply_dim_correction_map[ind]);
-    _mem_mgr->buildTensor(ind, tensor_info);
+    _mem_mgr->buildTensor(ind, tensor_info, info.shape().rank());
   }
 }
 
@@ -399,7 +399,8 @@ void TemplTensorBuilder<T_ITensor, T_Tensor, T_SubTensor, T_Object>::buildSubten
                                  _apply_dim_correction_map[current]);
       ::arm_compute::Coordinates coordinates =
           asTensorCoordinate(info.offset(), frontend_layout, backend_layout);
-      auto tensor = std::make_shared<T_SubTensor>(parent_tensor.get(), shape, coordinates, true);
+      auto tensor = std::make_shared<T_SubTensor>(parent_tensor.get(), shape, coordinates,
+                                                  info.shape().rank(), true);
       subtensors[current] = tensor;
       stack.pop();
     }
index d321fec..8fab277 100644 (file)
@@ -35,7 +35,7 @@ inline bool matchSizeExceptAxis(const ::neurun::backend::acl_neon::operand::INET
   {
     if (axis == i)
       continue;
-    if (t1->dimension(i) != t2->dimension(i))
+    if (t1->info()->dimension(i) != t2->info()->dimension(i))
       return false;
   }
   return true;
@@ -70,10 +70,10 @@ template <typename T> bool ConcatLayer::concatenate()
     {
       assert(_output_alloc->layout() == input->layout());
       assert(matchSizeExceptAxis(_output_alloc, input, _axis));
-      axis_sum += input->dimension(_axis);
+      axis_sum += input->info()->dimension(_axis);
     }
 
-    assert(_output_alloc->dimension(_axis) == axis_sum);
+    assert(_output_alloc->info()->dimension(_axis) == axis_sum);
   }
 
   VERBOSE(Concat_RUN) << "START Concat" << std::endl;
index c8d4d2d..a36af60 100644 (file)
@@ -26,9 +26,10 @@ namespace operand
 {
 
 NESubTensor::NESubTensor(INETensor *parent, const arm_compute::TensorShape &tensor_shape,
-                         const arm_compute::Coordinates &coords, bool extend_parent)
+                         const arm_compute::Coordinates &coords, size_t rank, bool extend_parent)
     : _ne_sub_tensor(std::make_shared<arm_compute::SubTensor>(parent->handle(), tensor_shape,
-                                                              coords, extend_parent))
+                                                              coords, extend_parent)),
+      _rank{rank}
 {
   // DO NOTHING
 }
index 6a1338c..010e4de 100644 (file)
@@ -37,7 +37,10 @@ public:
 
 public:
   NESubTensor(INETensor *parent, const arm_compute::TensorShape &tensor_shape,
-              const arm_compute::Coordinates &coords, bool extend_parent = false);
+              const arm_compute::Coordinates &coords, size_t rank, bool extend_parent = false);
+
+public:
+  size_t num_dimensions() const final { return _rank; }
 
 public:
   const arm_compute::SubTensor *handle() const override;
@@ -49,6 +52,7 @@ public:
 
 private:
   std::shared_ptr<arm_compute::SubTensor> _ne_sub_tensor;
+  size_t _rank;
 };
 
 } // namespace operand
index ec424e5..756403e 100644 (file)
@@ -27,8 +27,8 @@ namespace acl_neon
 namespace operand
 {
 
-NETensor::NETensor(const arm_compute::TensorInfo &info)
-    : _ne_tensor(std::make_shared<arm_compute::Tensor>())
+NETensor::NETensor(const arm_compute::TensorInfo &info, size_t rank)
+    : _ne_tensor(std::make_shared<arm_compute::Tensor>()), _rank{rank}
 {
   allocator()->init(info);
 }
index c1a5b5b..298a820 100644 (file)
@@ -37,7 +37,10 @@ public:
   NETensor() = delete;
 
 public:
-  NETensor(const arm_compute::TensorInfo &info);
+  NETensor(const arm_compute::TensorInfo &info, size_t rank);
+
+public:
+  size_t num_dimensions() const final { return _rank; }
 
 public:
   const arm_compute::Tensor *handle() const override;
@@ -48,6 +51,7 @@ public:
 
 private:
   std::shared_ptr<arm_compute::Tensor> _ne_tensor;
+  size_t _rank;
 };
 
 } // namespace operand
index 08df946..37ee8e3 100644 (file)
@@ -39,11 +39,11 @@ template <typename T> class View final : public nnfw::misc::feature::Reader<T>
 public:
   View(::neurun::backend::operand::ITensor *tensor) : _tensor{tensor}
   {
-    // TODO Validate whether tensor is a feature map, or not
-    _shape.N = tensor->dimension(3);
-    _shape.C = tensor->dimension(2);
-    _shape.H = tensor->dimension(1);
-    _shape.W = tensor->dimension(0);
+    assert(tensor->num_dimensions() == 4 && tensor->layout() == model::Layout::NCHW);
+    _shape.N = tensor->dimension(0);
+    _shape.C = tensor->dimension(1);
+    _shape.H = tensor->dimension(2);
+    _shape.W = tensor->dimension(3);
   }
 
 public: