From a2dc5f991ce5a8f33052311e0dd49eccef44c16f Mon Sep 17 00:00:00 2001 From: =?utf8?q?=EB=B0=95=EC=84=B8=ED=9D=AC/=EB=8F=99=EC=9E=91=EC=A0=9C?= =?utf8?q?=EC=96=B4Lab=28SR=29/Principal=20Engineer/=EC=82=BC=EC=84=B1?= =?utf8?q?=EC=A0=84=EC=9E=90?= Date: Thu, 3 May 2018 15:57:44 +0900 Subject: [PATCH] Fix softmax of ACL op for matrix input (#1020) This will fix ACL softmax op that has matrix input - this adds MatrixInputAccessor class to handle matrix input Signed-off-by: SaeHie Park --- libs/kernel/acl/src/IO_accessor.cpp | 27 +++++++++++++++++++++++++++ libs/kernel/acl/src/IO_accessor.h | 14 ++++++++++++++ libs/kernel/acl/src/cl/Softmax.cpp | 5 ++--- 3 files changed, 43 insertions(+), 3 deletions(-) diff --git a/libs/kernel/acl/src/IO_accessor.cpp b/libs/kernel/acl/src/IO_accessor.cpp index ccbc5ce..410fb8e 100644 --- a/libs/kernel/acl/src/IO_accessor.cpp +++ b/libs/kernel/acl/src/IO_accessor.cpp @@ -28,6 +28,12 @@ InputAccessor::InputAccessor(const float* inputData, const nnfw::rt::Shape& inpu { } +MatrixInputAccessor::MatrixInputAccessor(const float* inputData, const nnfw::rt::Shape& inputShape) + : _inputData(inputData) + , _inputShape(inputShape) +{ +} + VectorInputAccessor::VectorInputAccessor(const float* inputData, const nnfw::rt::Shape& inputShape) : _inputData(inputData) , _inputShape(inputShape) @@ -147,6 +153,27 @@ bool InputAccessor::access_tensor(arm_compute::ITensor &tensor) return true; } +bool MatrixInputAccessor::access_tensor(arm_compute::ITensor &tensor) +{ + arm_compute::Window window; + window.use_tensor_dimensions(tensor.info()->tensor_shape()); + + assert(tensor.info()->tensor_shape().num_dimensions() <= 2); + + execute_window_loop(window, [&](const arm_compute::Coordinates& id) + { + const auto row = id[1]; + const auto col = id[0]; + const auto W = tensor.info()->tensor_shape().x(); + + const auto offset = row * W + col; + + *reinterpret_cast(tensor.ptr_to_element(id)) = + *(_inputData + offset); + }); + return true; +} + bool VectorInputAccessor::access_tensor(arm_compute::ITensor &tensor) { arm_compute::Window window; diff --git a/libs/kernel/acl/src/IO_accessor.h b/libs/kernel/acl/src/IO_accessor.h index 87837e5..e7670f1 100644 --- a/libs/kernel/acl/src/IO_accessor.h +++ b/libs/kernel/acl/src/IO_accessor.h @@ -41,6 +41,20 @@ private: const nnfw::rt::Shape& _inputShape; }; +class MatrixInputAccessor : public arm_compute::graph::ITensorAccessor +{ +public: + MatrixInputAccessor(const float* inputData, const nnfw::rt::Shape& inputShape); + MatrixInputAccessor(MatrixInputAccessor&&) = default; + + // Inherited methods overriden: + bool access_tensor(arm_compute::ITensor& tensor) override; + +private: + const float* _inputData; + const nnfw::rt::Shape& _inputShape; +}; + class VectorInputAccessor : public arm_compute::graph::ITensorAccessor { public: diff --git a/libs/kernel/acl/src/cl/Softmax.cpp b/libs/kernel/acl/src/cl/Softmax.cpp index 00fd3ac..778347e 100644 --- a/libs/kernel/acl/src/cl/Softmax.cpp +++ b/libs/kernel/acl/src/cl/Softmax.cpp @@ -57,14 +57,13 @@ bool softmaxFloat32(const float* inputData, const nnfw::rt::Shape& inputShape, } else if (inputShape.dimensions.size() == 2) { - // Softmax comes with 1xN matrix and this is translated to N vector in arm_compute::TensorShape - TensorAccess(input, inputData, inputShape); + TensorAccess(input, inputData, inputShape); softmax_f->run(); arm_compute::CLScheduler::get().sync(); - TensorAccess(output, outputData, outputShape); + TensorAccess(output, outputData, outputShape); } else { -- 2.7.4