IVGCVSW-3846 Add NEON GATHER Workload
authorTeresa Charlin <teresa.charlinreyes@arm.com>
Fri, 10 Apr 2020 18:24:55 +0000 (19:24 +0100)
committerTeresaARM <teresa.charlinreyes@arm.com>
Fri, 29 May 2020 16:21:58 +0000 (16:21 +0000)
Signed-off-by: Teresa Charlin <teresa.charlinreyes@arm.com>
Change-Id: I1a66fdad63cef16866d9dfcb8a339647f856e1d4

src/backends/aclCommon/ArmComputeUtils.hpp
src/backends/neon/NeonLayerSupport.cpp
src/backends/neon/NeonLayerSupport.hpp
src/backends/neon/NeonWorkloadFactory.cpp
src/backends/neon/backend.mk
src/backends/neon/test/NeonLayerTests.cpp
src/backends/neon/workloads/CMakeLists.txt
src/backends/neon/workloads/NeonConstantWorkload.cpp
src/backends/neon/workloads/NeonGatherWorkload.cpp [new file with mode: 0644]
src/backends/neon/workloads/NeonGatherWorkload.hpp [new file with mode: 0644]
src/backends/neon/workloads/NeonWorkloads.hpp

index d8818ce..0ee13b3 100644 (file)
@@ -186,4 +186,19 @@ inline std::set<unsigned int> ComputeSplitAxis(const armnn::SplitterDescriptor&
     return splitAxis;
 }
 
+/// Function to convert ArmNN axis (left to right) to ACL axis (right to left) ranging from [-dim, dim)
+inline int ComputeAclAxis(const int& armnnAxis, const armnn::TensorInfo& tensor)
+{
+    int dim = static_cast<int>(tensor.GetNumDimensions());
+
+    ARMNN_ASSERT(dim != 0);
+    ARMNN_ASSERT((-1 * dim) <= armnnAxis);
+    ARMNN_ASSERT(armnnAxis < dim);
+
+    int sign = (armnnAxis < 0) ? -1 : 1;
+    int aclAxis = sign * dim - 1  - armnnAxis;
+
+    return aclAxis;
+}
+
 } // namespace armnn
index 53d0f0b..a514c8f 100644 (file)
@@ -44,6 +44,7 @@
 #include "workloads/NeonNegWorkload.hpp"
 #include "workloads/NeonNormalizationFloatWorkload.hpp"
 #include "workloads/NeonFullyConnectedWorkload.hpp"
+#include "workloads/NeonGatherWorkload.hpp"
 #include "workloads/NeonPadWorkload.hpp"
 #include "workloads/NeonPermuteWorkload.hpp"
 #include "workloads/NeonPooling2dWorkload.hpp"
@@ -435,6 +436,18 @@ bool NeonLayerSupport::IsFullyConnectedSupported(const TensorInfo& input,
                                    descriptor);
 }
 
+bool NeonLayerSupport::IsGatherSupported(const TensorInfo& input0,
+                                         const TensorInfo& input1,
+                                         const TensorInfo& output,
+                                         Optional<std::string&> reasonIfUnsupported) const
+{
+    FORWARD_WORKLOAD_VALIDATE_FUNC(NeonGatherWorkloadValidate,
+                                   reasonIfUnsupported,
+                                   input0,
+                                   input1,
+                                   output);
+}
+
 bool NeonLayerSupport::IsGreaterSupported(const armnn::TensorInfo& input0,
                                           const armnn::TensorInfo& input1,
                                           const armnn::TensorInfo& output,
index adb1891..2581782 100644 (file)
@@ -123,6 +123,11 @@ public:
                                    const FullyConnectedDescriptor& descriptor,
                                    Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    bool IsGatherSupported(const TensorInfo& input0,
+                           const TensorInfo& input1,
+                           const TensorInfo& output,
+                           Optional<std::string&> reasonIfUnsupported) const override;
+
     ARMNN_DEPRECATED_MSG("Use IsComparisonSupported instead")
     bool IsGreaterSupported(const TensorInfo& input0,
                             const TensorInfo& input1,
index ee0e703..35082f7 100644 (file)
@@ -280,7 +280,7 @@ std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateFullyConnected(
 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateGather(const armnn::GatherQueueDescriptor& descriptor,
                                                              const armnn::WorkloadInfo& info) const
 {
-    return MakeWorkloadHelper<NullWorkload, NullWorkload>(descriptor, info);
+    return std::make_unique<NeonGatherWorkload>(descriptor, info);
 }
 
 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateGreater(const GreaterQueueDescriptor& descriptor,
index 225687f..9e9c38c 100644 (file)
@@ -42,6 +42,7 @@ BACKEND_SOURCES := \
         workloads/NeonExpWorkload.cpp \
         workloads/NeonFloorFloatWorkload.cpp \
         workloads/NeonFullyConnectedWorkload.cpp \
+        workloads/NeonGatherWorkload.cpp \
         workloads/NeonInstanceNormalizationWorkload.cpp \
         workloads/NeonL2NormalizationFloatWorkload.cpp \
         workloads/NeonLstmFloatWorkload.cpp \
index 0e0558b..1ac2c61 100644 (file)
@@ -673,6 +673,12 @@ ARMNN_AUTO_TEST_CASE(L2NormalizationNonDefaultEpsilon, L2NormalizationNonDefault
 // Floor
 ARMNN_AUTO_TEST_CASE(SimpleFloor, SimpleFloorTest<DataType::Float32>)
 
+// Gather
+ARMNN_AUTO_TEST_CASE(Gather1dParamsFloat32, Gather1dParamsFloat32Test)
+ARMNN_AUTO_TEST_CASE(Gather1dParamsUint8, Gather1dParamsUint8Test)
+ARMNN_AUTO_TEST_CASE(GatherMultiDimParamsFloat32, GatherMultiDimParamsFloat32Test)
+ARMNN_AUTO_TEST_CASE(GatherMultiDimParamsUint8, GatherMultiDimParamsUint8Test)
+
 // Equal
 ARMNN_AUTO_TEST_CASE(EqualSimple,            EqualSimpleTest)
 ARMNN_AUTO_TEST_CASE(EqualBroadcast1Element, EqualBroadcast1ElementTest)
index 6b3fe67..685d75b 100644 (file)
@@ -46,6 +46,8 @@ list(APPEND armnnNeonBackendWorkloads_sources
     NeonFloorFloatWorkload.hpp
     NeonFullyConnectedWorkload.cpp
     NeonFullyConnectedWorkload.hpp
+    NeonGatherWorkload.cpp
+    NeonGatherWorkload.hpp
     NeonInstanceNormalizationWorkload.cpp
     NeonInstanceNormalizationWorkload.hpp
     NeonL2NormalizationFloatWorkload.cpp
index f7c8a73..05fdcf2 100644 (file)
@@ -23,7 +23,7 @@ arm_compute::Status NeonConstantWorkloadValidate(const TensorInfo& output)
 {
     const arm_compute::TensorInfo neonOutputInfo = armcomputetensorutils::BuildArmComputeTensorInfo(output);
 
-    std::array<arm_compute::DataType,8> supportedTypes = {
+    std::array<arm_compute::DataType,9> supportedTypes = {
             arm_compute::DataType::BFLOAT16,
             arm_compute::DataType::F16,
             arm_compute::DataType::F32,
@@ -31,7 +31,8 @@ arm_compute::Status NeonConstantWorkloadValidate(const TensorInfo& output)
             arm_compute::DataType::QASYMM8_SIGNED,
             arm_compute::DataType::QSYMM16,
             arm_compute::DataType::QSYMM8,
-            arm_compute::DataType::QSYMM8_PER_CHANNEL
+            arm_compute::DataType::QSYMM8_PER_CHANNEL,
+            arm_compute::DataType::S32
     };
     auto it = std::find(begin(supportedTypes), end(supportedTypes), neonOutputInfo.data_type());
 
@@ -110,6 +111,11 @@ void NeonConstantWorkload::Execute() const
                 CopyArmComputeITensorData(data.m_LayerOutput->GetConstTensor<int8_t>(), output);
                 break;
             }
+            case arm_compute::DataType::S32:
+            {
+                CopyArmComputeITensorData(data.m_LayerOutput->GetConstTensor<int32_t>(), output);
+                break;
+            }
             default:
             {
                 ARMNN_ASSERT_MSG(false, "Unknown data type");
diff --git a/src/backends/neon/workloads/NeonGatherWorkload.cpp b/src/backends/neon/workloads/NeonGatherWorkload.cpp
new file mode 100644 (file)
index 0000000..2e7c741
--- /dev/null
@@ -0,0 +1,46 @@
+//
+// Copyright © 2020 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "NeonGatherWorkload.hpp"
+#include "NeonWorkloadUtils.hpp"
+#include <armnn/utility/PolymorphicDowncast.hpp>
+#include <aclCommon/ArmComputeUtils.hpp>
+
+namespace armnn
+{
+arm_compute::Status NeonGatherWorkloadValidate(const TensorInfo& input,
+                                               const TensorInfo& indices,
+                                               const TensorInfo& output)
+{
+    const arm_compute::TensorInfo aclInput   = BuildArmComputeTensorInfo(input);
+    const arm_compute::TensorInfo aclIndices = BuildArmComputeTensorInfo(indices);
+    const arm_compute::TensorInfo aclOutput  = BuildArmComputeTensorInfo(output);
+
+    int aclAxis = ComputeAclAxis(0, input);
+
+    return arm_compute::NEGather::validate(&aclInput, &aclIndices, &aclOutput, aclAxis);
+}
+
+NeonGatherWorkload::NeonGatherWorkload(const GatherQueueDescriptor& descriptor,
+                                       const WorkloadInfo& info)
+        : BaseWorkload<GatherQueueDescriptor>(descriptor, info)
+{
+    m_Data.ValidateInputsOutputs("NeonGatherWorkload", 1, 1);
+
+    arm_compute::ITensor& input   = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+    arm_compute::ITensor& indices = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[1])->GetTensor();
+    arm_compute::ITensor& output  = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+
+    int aclAxis = ComputeAclAxis(0, info.m_InputTensorInfos[0]);
+
+    m_Layer.configure(&input, &indices, &output, aclAxis);
+}
+
+void NeonGatherWorkload::Execute() const
+{
+    ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonGatherWorkload_Execute");
+    m_Layer.run();
+}
+} //namespace armnn
\ No newline at end of file
diff --git a/src/backends/neon/workloads/NeonGatherWorkload.hpp b/src/backends/neon/workloads/NeonGatherWorkload.hpp
new file mode 100644 (file)
index 0000000..b1b47a5
--- /dev/null
@@ -0,0 +1,28 @@
+//
+// Copyright © 2020 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <backendsCommon/Workload.hpp>
+
+#include <arm_compute/runtime/NEON/functions/NEGather.h>
+
+namespace armnn
+{
+arm_compute::Status NeonGatherWorkloadValidate(const TensorInfo& input,
+                                               const TensorInfo& indices,
+                                               const TensorInfo& output);
+
+class NeonGatherWorkload : public BaseWorkload<GatherQueueDescriptor>
+{
+public:
+    NeonGatherWorkload(const GatherQueueDescriptor& descriptor, const WorkloadInfo& info);
+    virtual void Execute() const override;
+
+private:
+    mutable arm_compute::NEGather m_Layer;
+};
+
+} //namespace armnn
\ No newline at end of file
index 2da6ea0..243f5a4 100644 (file)
@@ -26,6 +26,7 @@
 #include "NeonExpWorkload.hpp"
 #include "NeonFloorFloatWorkload.hpp"
 #include "NeonFullyConnectedWorkload.hpp"
+#include "NeonGatherWorkload.hpp"
 #include "NeonInstanceNormalizationWorkload.hpp"
 #include "NeonL2NormalizationFloatWorkload.hpp"
 #include "NeonLstmFloatWorkload.hpp"