[Tizen5.0-M1] Fix softmax of ACL op for matrix input (#1066)
author서상민/동작제어Lab(SR)/Senior Engineer/삼성전자 <sangmin7.seo@samsung.com>
Fri, 4 May 2018 00:16:28 +0000 (09:16 +0900)
committer이성재/동작제어Lab(SR)/Principal Engineer/삼성전자 <sj925.lee@samsung.com>
Fri, 4 May 2018 00:16:28 +0000 (09:16 +0900)
Cherry-picked a2dc5f991c (original PR: #1020)

This will fix ACL softmax op that has matrix input
- this adds MatrixInputAccessor class to handle matrix input

Signed-off-by: SaeHie Park <saehie.park@samsung.com>
libs/kernel/acl/src/IO_accessor.cpp
libs/kernel/acl/src/IO_accessor.h
libs/kernel/acl/src/cl/Softmax.cpp

index ccbc5ce..410fb8e 100644 (file)
@@ -28,6 +28,12 @@ InputAccessor::InputAccessor(const float* inputData, const nnfw::rt::Shape& inpu
 {
 }
 
+MatrixInputAccessor::MatrixInputAccessor(const float* inputData, const nnfw::rt::Shape& inputShape)
+  : _inputData(inputData)
+  , _inputShape(inputShape)
+{
+}
+
 VectorInputAccessor::VectorInputAccessor(const float* inputData, const nnfw::rt::Shape& inputShape)
   : _inputData(inputData)
   , _inputShape(inputShape)
@@ -147,6 +153,27 @@ bool InputAccessor::access_tensor(arm_compute::ITensor &tensor)
   return true;
 }
 
+bool MatrixInputAccessor::access_tensor(arm_compute::ITensor &tensor)
+{
+  arm_compute::Window window;
+  window.use_tensor_dimensions(tensor.info()->tensor_shape());
+
+  assert(tensor.info()->tensor_shape().num_dimensions() <= 2);
+
+  execute_window_loop(window, [&](const arm_compute::Coordinates& id)
+  {
+    const auto row = id[1];
+    const auto col = id[0];
+    const auto W = tensor.info()->tensor_shape().x();
+
+    const auto offset = row * W + col;
+
+    *reinterpret_cast<float *>(tensor.ptr_to_element(id)) =
+        *(_inputData + offset);
+  });
+  return true;
+}
+
 bool VectorInputAccessor::access_tensor(arm_compute::ITensor &tensor)
 {
   arm_compute::Window window;
index 87837e5..e7670f1 100644 (file)
@@ -41,6 +41,20 @@ private:
     const nnfw::rt::Shape& _inputShape;
 };
 
+class MatrixInputAccessor : public arm_compute::graph::ITensorAccessor
+{
+public:
+    MatrixInputAccessor(const float* inputData, const nnfw::rt::Shape& inputShape);
+    MatrixInputAccessor(MatrixInputAccessor&&) = default;
+
+    // Inherited methods overriden:
+    bool access_tensor(arm_compute::ITensor& tensor) override;
+
+private:
+    const float* _inputData;
+    const nnfw::rt::Shape& _inputShape;
+};
+
 class VectorInputAccessor : public arm_compute::graph::ITensorAccessor
 {
 public:
index 00fd3ac..778347e 100644 (file)
@@ -57,14 +57,13 @@ bool softmaxFloat32(const float* inputData, const nnfw::rt::Shape& inputShape,
   }
   else if (inputShape.dimensions.size() == 2)
   {
-    // Softmax comes with 1xN matrix and this is translated to N vector in arm_compute::TensorShape
-    TensorAccess<VectorInputAccessor>(input, inputData, inputShape);
+    TensorAccess<MatrixInputAccessor>(input, inputData, inputShape);
 
     softmax_f->run();
 
     arm_compute::CLScheduler::get().sync();
 
-    TensorAccess<VectorOutputAccessor>(output, outputData, outputShape);
+    TensorAccess<MatrixOutputAccessor>(output, outputData, outputShape);
   }
   else
   {