From 303338a58cec1533650c7349601a9d3366a77c1c Mon Sep 17 00:00:00 2001 From: =?utf8?q?=EA=B9=80=EC=9A=A9=EC=84=AD/=EB=8F=99=EC=9E=91=EC=A0=9C?= =?utf8?q?=EC=96=B4Lab=28SR=29/Engineer/=EC=82=BC=EC=84=B1=EC=A0=84?= =?utf8?q?=EC=9E=90?= Date: Thu, 16 Aug 2018 17:33:41 +0900 Subject: [PATCH] Support initializing constant matrix(2d type) at compilation time (#2307) - while initializing matrix at execute time as input/output is supported, initializing constant matrix(2d type) at compilation time is not done so far in nnfw. - initialize constant matrix by using matrix's View, Reader and Iterator classes at compile time. Signed-off-by: Yongseop Kim --- runtimes/pure_arm_compute/src/compilation.cc | 29 +++++++++++++++++++++------- 1 file changed, 22 insertions(+), 7 deletions(-) diff --git a/runtimes/pure_arm_compute/src/compilation.cc b/runtimes/pure_arm_compute/src/compilation.cc index a3c9354..09beb35 100644 --- a/runtimes/pure_arm_compute/src/compilation.cc +++ b/runtimes/pure_arm_compute/src/compilation.cc @@ -25,7 +25,9 @@ #include #include "internal/arm_compute/Cast.h" +#include "internal/arm_compute/matrix/View.h" #include "internal/arm_compute/kernel/View.h" +#include "internal/nnapi/matrix/Reader.h" #include "internal/nnapi/kernel/Reader.h" #include "internal/nnapi/feature/Reader.h" #include "internal/nnapi/feature/View.h" @@ -34,6 +36,7 @@ #include "internal/layers/SimpleArithmeticAddition.h" #include "internal/layers/SimpleCastLayer.h" +#include "util/matrix/IndexIterator.h" #include "util/kernel/IndexIterator.h" #include "util/feature/IndexIterator.h" @@ -190,6 +193,20 @@ static void initVectorTensor(::arm_compute::ITensor &tensor, const uint8_t *vec_ } template +static void initMatrixTensor(::arm_compute::ITensor &tensor, + const nnfw::util::matrix::Shape &matrix_shape, + const uint8_t *matrix_base, const size_t matrix_size) +{ + const ::internal::nnapi::matrix::Reader from{matrix_shape, matrix_base, matrix_size}; + ::internal::arm_compute::matrix::View into{&tensor}; + + ::nnfw::util::matrix::iterate(matrix_shape) << [&](uint32_t row, uint32_t col) { + const auto value = from.at(row, col); + into.at(row, col) = value; + }; +} + +template static void initKernelTensor(::arm_compute::ITensor &tensor, const nnfw::util::kernel::Shape &kernel_shape, const uint8_t *kernel_base, const size_t kernel_size) @@ -3078,27 +3095,25 @@ void PlanBuilder::finalize(void) const } case 2: // matrix { - // use workaround: work like vector initialization - auto operand_rows = shape.dim(0); - auto operand_cols = shape.dim(1); - auto size = operand_rows * operand_cols; + const auto matrix_shape = shape.asMatrix(); + auto size = operands.at(operand_idx).data().size(); switch (type) { case ANEURALNETWORKS_TENSOR_FLOAT32: { - auto initializer = std::bind(initVectorTensor, _1, base, size); + auto initializer = std::bind(initMatrixTensor, _1, matrix_shape, base, size); _plan.operands().at(operand_idx).access(initializer); break; } case ANEURALNETWORKS_TENSOR_INT32: { - auto initializer = std::bind(initVectorTensor, _1, base, size); + auto initializer = std::bind(initMatrixTensor, _1, matrix_shape, base, size); _plan.operands().at(operand_idx).access(initializer); break; } case ANEURALNETWORKS_TENSOR_QUANT8_ASYMM: { - auto initializer = std::bind(initVectorTensor, _1, base, size); + auto initializer = std::bind(initMatrixTensor, _1, matrix_shape, base, size); _plan.operands().at(operand_idx).access(initializer); break; } -- 2.7.4