Support initializing constant matrix(2d type) at compilation time (#2307)
author김용섭/동작제어Lab(SR)/Engineer/삼성전자 <yons.kim@samsung.com>
Thu, 16 Aug 2018 08:33:41 +0000 (17:33 +0900)
committer이춘석/동작제어Lab(SR)/Staff Engineer/삼성전자 <chunseok.lee@samsung.com>
Thu, 16 Aug 2018 08:33:41 +0000 (17:33 +0900)
- while initializing matrix at execute time as input/output
is supported, initializing constant matrix(2d type) at
compilation time is not done so far in nnfw.
- initialize constant matrix by using matrix's View, Reader and
Iterator classes at compile time.

Signed-off-by: Yongseop Kim <yons.kim@samsung.com>
runtimes/pure_arm_compute/src/compilation.cc

index a3c9354..09beb35 100644 (file)
@@ -25,7 +25,9 @@
 #include <arm_compute/runtime/CL/functions/CLReductionMean.h>
 
 #include "internal/arm_compute/Cast.h"
+#include "internal/arm_compute/matrix/View.h"
 #include "internal/arm_compute/kernel/View.h"
+#include "internal/nnapi/matrix/Reader.h"
 #include "internal/nnapi/kernel/Reader.h"
 #include "internal/nnapi/feature/Reader.h"
 #include "internal/nnapi/feature/View.h"
@@ -34,6 +36,7 @@
 #include "internal/layers/SimpleArithmeticAddition.h"
 #include "internal/layers/SimpleCastLayer.h"
 
+#include "util/matrix/IndexIterator.h"
 #include "util/kernel/IndexIterator.h"
 #include "util/feature/IndexIterator.h"
 
@@ -190,6 +193,20 @@ static void initVectorTensor(::arm_compute::ITensor &tensor, const uint8_t *vec_
 }
 
 template <typename T>
+static void initMatrixTensor(::arm_compute::ITensor &tensor,
+                             const nnfw::util::matrix::Shape &matrix_shape,
+                             const uint8_t *matrix_base, const size_t matrix_size)
+{
+  const ::internal::nnapi::matrix::Reader<T> from{matrix_shape, matrix_base, matrix_size};
+  ::internal::arm_compute::matrix::View<T> into{&tensor};
+
+  ::nnfw::util::matrix::iterate(matrix_shape) << [&](uint32_t row, uint32_t col) {
+    const auto value = from.at(row, col);
+    into.at(row, col) = value;
+  };
+}
+
+template <typename T>
 static void initKernelTensor(::arm_compute::ITensor &tensor,
                              const nnfw::util::kernel::Shape &kernel_shape,
                              const uint8_t *kernel_base, const size_t kernel_size)
@@ -3078,27 +3095,25 @@ void PlanBuilder::finalize(void) const
         }
         case 2: // matrix
         {
-          // use workaround: work like vector initialization
-          auto operand_rows = shape.dim(0);
-          auto operand_cols = shape.dim(1);
-          auto size = operand_rows * operand_cols;
+          const auto matrix_shape = shape.asMatrix();
+          auto size = operands.at(operand_idx).data().size();
           switch (type)
           {
             case ANEURALNETWORKS_TENSOR_FLOAT32:
             {
-              auto initializer = std::bind(initVectorTensor<float>, _1, base, size);
+              auto initializer = std::bind(initMatrixTensor<float>, _1, matrix_shape, base, size);
               _plan.operands().at(operand_idx).access(initializer);
               break;
             }
             case ANEURALNETWORKS_TENSOR_INT32:
             {
-              auto initializer = std::bind(initVectorTensor<int32_t>, _1, base, size);
+              auto initializer = std::bind(initMatrixTensor<int32_t>, _1, matrix_shape, base, size);
               _plan.operands().at(operand_idx).access(initializer);
               break;
             }
             case ANEURALNETWORKS_TENSOR_QUANT8_ASYMM:
             {
-              auto initializer = std::bind(initVectorTensor<uint8_t>, _1, base, size);
+              auto initializer = std::bind(initMatrixTensor<uint8_t>, _1, matrix_shape, base, size);
               _plan.operands().at(operand_idx).access(initializer);
               break;
             }