From: 서상민/동작제어Lab(SR)/Senior Engineer/삼성전자 Date: Fri, 4 May 2018 00:16:28 +0000 (+0900) Subject: [Tizen5.0-M1] Fix softmax of ACL op for matrix input (#1066) X-Git-Tag: 0.1~22 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=a350bf7155d838872e2db66579eec45d7c38be13;p=platform%2Fcore%2Fml%2Fnnfw.git [Tizen5.0-M1] Fix softmax of ACL op for matrix input (#1066) Cherry-picked a2dc5f991c (original PR: #1020) This will fix ACL softmax op that has matrix input - this adds MatrixInputAccessor class to handle matrix input Signed-off-by: SaeHie Park --- diff --git a/libs/kernel/acl/src/IO_accessor.cpp b/libs/kernel/acl/src/IO_accessor.cpp index ccbc5ce..410fb8e 100644 --- a/libs/kernel/acl/src/IO_accessor.cpp +++ b/libs/kernel/acl/src/IO_accessor.cpp @@ -28,6 +28,12 @@ InputAccessor::InputAccessor(const float* inputData, const nnfw::rt::Shape& inpu { } +MatrixInputAccessor::MatrixInputAccessor(const float* inputData, const nnfw::rt::Shape& inputShape) + : _inputData(inputData) + , _inputShape(inputShape) +{ +} + VectorInputAccessor::VectorInputAccessor(const float* inputData, const nnfw::rt::Shape& inputShape) : _inputData(inputData) , _inputShape(inputShape) @@ -147,6 +153,27 @@ bool InputAccessor::access_tensor(arm_compute::ITensor &tensor) return true; } +bool MatrixInputAccessor::access_tensor(arm_compute::ITensor &tensor) +{ + arm_compute::Window window; + window.use_tensor_dimensions(tensor.info()->tensor_shape()); + + assert(tensor.info()->tensor_shape().num_dimensions() <= 2); + + execute_window_loop(window, [&](const arm_compute::Coordinates& id) + { + const auto row = id[1]; + const auto col = id[0]; + const auto W = tensor.info()->tensor_shape().x(); + + const auto offset = row * W + col; + + *reinterpret_cast(tensor.ptr_to_element(id)) = + *(_inputData + offset); + }); + return true; +} + bool VectorInputAccessor::access_tensor(arm_compute::ITensor &tensor) { arm_compute::Window window; diff --git a/libs/kernel/acl/src/IO_accessor.h b/libs/kernel/acl/src/IO_accessor.h index 87837e5..e7670f1 100644 --- a/libs/kernel/acl/src/IO_accessor.h +++ b/libs/kernel/acl/src/IO_accessor.h @@ -41,6 +41,20 @@ private: const nnfw::rt::Shape& _inputShape; }; +class MatrixInputAccessor : public arm_compute::graph::ITensorAccessor +{ +public: + MatrixInputAccessor(const float* inputData, const nnfw::rt::Shape& inputShape); + MatrixInputAccessor(MatrixInputAccessor&&) = default; + + // Inherited methods overriden: + bool access_tensor(arm_compute::ITensor& tensor) override; + +private: + const float* _inputData; + const nnfw::rt::Shape& _inputShape; +}; + class VectorInputAccessor : public arm_compute::graph::ITensorAccessor { public: diff --git a/libs/kernel/acl/src/cl/Softmax.cpp b/libs/kernel/acl/src/cl/Softmax.cpp index 00fd3ac..778347e 100644 --- a/libs/kernel/acl/src/cl/Softmax.cpp +++ b/libs/kernel/acl/src/cl/Softmax.cpp @@ -57,14 +57,13 @@ bool softmaxFloat32(const float* inputData, const nnfw::rt::Shape& inputShape, } else if (inputShape.dimensions.size() == 2) { - // Softmax comes with 1xN matrix and this is translated to N vector in arm_compute::TensorShape - TensorAccess(input, inputData, inputShape); + TensorAccess(input, inputData, inputShape); softmax_f->run(); arm_compute::CLScheduler::get().sync(); - TensorAccess(output, outputData, outputShape); + TensorAccess(output, outputData, outputShape); } else {