From: 김용섭/동작제어Lab(SR)/Engineer/삼성전자 Date: Thu, 16 Aug 2018 08:33:41 +0000 (+0900) Subject: Support initializing constant matrix(2d type) at compilation time (#2307) X-Git-Tag: 0.2~262 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=303338a58cec1533650c7349601a9d3366a77c1c;p=platform%2Fcore%2Fml%2Fnnfw.git Support initializing constant matrix(2d type) at compilation time (#2307) - while initializing matrix at execute time as input/output is supported, initializing constant matrix(2d type) at compilation time is not done so far in nnfw. - initialize constant matrix by using matrix's View, Reader and Iterator classes at compile time. Signed-off-by: Yongseop Kim --- diff --git a/runtimes/pure_arm_compute/src/compilation.cc b/runtimes/pure_arm_compute/src/compilation.cc index a3c9354..09beb35 100644 --- a/runtimes/pure_arm_compute/src/compilation.cc +++ b/runtimes/pure_arm_compute/src/compilation.cc @@ -25,7 +25,9 @@ #include #include "internal/arm_compute/Cast.h" +#include "internal/arm_compute/matrix/View.h" #include "internal/arm_compute/kernel/View.h" +#include "internal/nnapi/matrix/Reader.h" #include "internal/nnapi/kernel/Reader.h" #include "internal/nnapi/feature/Reader.h" #include "internal/nnapi/feature/View.h" @@ -34,6 +36,7 @@ #include "internal/layers/SimpleArithmeticAddition.h" #include "internal/layers/SimpleCastLayer.h" +#include "util/matrix/IndexIterator.h" #include "util/kernel/IndexIterator.h" #include "util/feature/IndexIterator.h" @@ -190,6 +193,20 @@ static void initVectorTensor(::arm_compute::ITensor &tensor, const uint8_t *vec_ } template +static void initMatrixTensor(::arm_compute::ITensor &tensor, + const nnfw::util::matrix::Shape &matrix_shape, + const uint8_t *matrix_base, const size_t matrix_size) +{ + const ::internal::nnapi::matrix::Reader from{matrix_shape, matrix_base, matrix_size}; + ::internal::arm_compute::matrix::View into{&tensor}; + + ::nnfw::util::matrix::iterate(matrix_shape) << [&](uint32_t row, uint32_t col) { + const auto value = from.at(row, col); + into.at(row, col) = value; + }; +} + +template static void initKernelTensor(::arm_compute::ITensor &tensor, const nnfw::util::kernel::Shape &kernel_shape, const uint8_t *kernel_base, const size_t kernel_size) @@ -3078,27 +3095,25 @@ void PlanBuilder::finalize(void) const } case 2: // matrix { - // use workaround: work like vector initialization - auto operand_rows = shape.dim(0); - auto operand_cols = shape.dim(1); - auto size = operand_rows * operand_cols; + const auto matrix_shape = shape.asMatrix(); + auto size = operands.at(operand_idx).data().size(); switch (type) { case ANEURALNETWORKS_TENSOR_FLOAT32: { - auto initializer = std::bind(initVectorTensor, _1, base, size); + auto initializer = std::bind(initMatrixTensor, _1, matrix_shape, base, size); _plan.operands().at(operand_idx).access(initializer); break; } case ANEURALNETWORKS_TENSOR_INT32: { - auto initializer = std::bind(initVectorTensor, _1, base, size); + auto initializer = std::bind(initMatrixTensor, _1, matrix_shape, base, size); _plan.operands().at(operand_idx).access(initializer); break; } case ANEURALNETWORKS_TENSOR_QUANT8_ASYMM: { - auto initializer = std::bind(initVectorTensor, _1, base, size); + auto initializer = std::bind(initMatrixTensor, _1, matrix_shape, base, size); _plan.operands().at(operand_idx).access(initializer); break; }