#include <arm_compute/runtime/CL/functions/CLReductionMean.h>
#include "internal/arm_compute/Cast.h"
+#include "internal/arm_compute/matrix/View.h"
#include "internal/arm_compute/kernel/View.h"
+#include "internal/nnapi/matrix/Reader.h"
#include "internal/nnapi/kernel/Reader.h"
#include "internal/nnapi/feature/Reader.h"
#include "internal/nnapi/feature/View.h"
#include "internal/layers/SimpleArithmeticAddition.h"
#include "internal/layers/SimpleCastLayer.h"
+#include "util/matrix/IndexIterator.h"
#include "util/kernel/IndexIterator.h"
#include "util/feature/IndexIterator.h"
}
template <typename T>
+static void initMatrixTensor(::arm_compute::ITensor &tensor,
+ const nnfw::util::matrix::Shape &matrix_shape,
+ const uint8_t *matrix_base, const size_t matrix_size)
+{
+ const ::internal::nnapi::matrix::Reader<T> from{matrix_shape, matrix_base, matrix_size};
+ ::internal::arm_compute::matrix::View<T> into{&tensor};
+
+ ::nnfw::util::matrix::iterate(matrix_shape) << [&](uint32_t row, uint32_t col) {
+ const auto value = from.at(row, col);
+ into.at(row, col) = value;
+ };
+}
+
+template <typename T>
static void initKernelTensor(::arm_compute::ITensor &tensor,
const nnfw::util::kernel::Shape &kernel_shape,
const uint8_t *kernel_base, const size_t kernel_size)
}
case 2: // matrix
{
- // use workaround: work like vector initialization
- auto operand_rows = shape.dim(0);
- auto operand_cols = shape.dim(1);
- auto size = operand_rows * operand_cols;
+ const auto matrix_shape = shape.asMatrix();
+ auto size = operands.at(operand_idx).data().size();
switch (type)
{
case ANEURALNETWORKS_TENSOR_FLOAT32:
{
- auto initializer = std::bind(initVectorTensor<float>, _1, base, size);
+ auto initializer = std::bind(initMatrixTensor<float>, _1, matrix_shape, base, size);
_plan.operands().at(operand_idx).access(initializer);
break;
}
case ANEURALNETWORKS_TENSOR_INT32:
{
- auto initializer = std::bind(initVectorTensor<int32_t>, _1, base, size);
+ auto initializer = std::bind(initMatrixTensor<int32_t>, _1, matrix_shape, base, size);
_plan.operands().at(operand_idx).access(initializer);
break;
}
case ANEURALNETWORKS_TENSOR_QUANT8_ASYMM:
{
- auto initializer = std::bind(initVectorTensor<uint8_t>, _1, base, size);
+ auto initializer = std::bind(initMatrixTensor<uint8_t>, _1, matrix_shape, base, size);
_plan.operands().at(operand_idx).access(initializer);
break;
}