From f540eb8111ce5d241111da487be7d817661e29b4 Mon Sep 17 00:00:00 2001 From: Teresa Charlin Date: Fri, 10 Apr 2020 19:24:55 +0100 Subject: [PATCH] IVGCVSW-3846 Add NEON GATHER Workload Signed-off-by: Teresa Charlin Change-Id: I1a66fdad63cef16866d9dfcb8a339647f856e1d4 --- src/backends/aclCommon/ArmComputeUtils.hpp | 15 +++++++ src/backends/neon/NeonLayerSupport.cpp | 13 ++++++ src/backends/neon/NeonLayerSupport.hpp | 5 +++ src/backends/neon/NeonWorkloadFactory.cpp | 2 +- src/backends/neon/backend.mk | 1 + src/backends/neon/test/NeonLayerTests.cpp | 6 +++ src/backends/neon/workloads/CMakeLists.txt | 2 + .../neon/workloads/NeonConstantWorkload.cpp | 10 ++++- src/backends/neon/workloads/NeonGatherWorkload.cpp | 46 ++++++++++++++++++++++ src/backends/neon/workloads/NeonGatherWorkload.hpp | 28 +++++++++++++ src/backends/neon/workloads/NeonWorkloads.hpp | 1 + 11 files changed, 126 insertions(+), 3 deletions(-) create mode 100644 src/backends/neon/workloads/NeonGatherWorkload.cpp create mode 100644 src/backends/neon/workloads/NeonGatherWorkload.hpp diff --git a/src/backends/aclCommon/ArmComputeUtils.hpp b/src/backends/aclCommon/ArmComputeUtils.hpp index d8818ce..0ee13b3 100644 --- a/src/backends/aclCommon/ArmComputeUtils.hpp +++ b/src/backends/aclCommon/ArmComputeUtils.hpp @@ -186,4 +186,19 @@ inline std::set ComputeSplitAxis(const armnn::SplitterDescriptor& return splitAxis; } +/// Function to convert ArmNN axis (left to right) to ACL axis (right to left) ranging from [-dim, dim) +inline int ComputeAclAxis(const int& armnnAxis, const armnn::TensorInfo& tensor) +{ + int dim = static_cast(tensor.GetNumDimensions()); + + ARMNN_ASSERT(dim != 0); + ARMNN_ASSERT((-1 * dim) <= armnnAxis); + ARMNN_ASSERT(armnnAxis < dim); + + int sign = (armnnAxis < 0) ? -1 : 1; + int aclAxis = sign * dim - 1 - armnnAxis; + + return aclAxis; +} + } // namespace armnn diff --git a/src/backends/neon/NeonLayerSupport.cpp b/src/backends/neon/NeonLayerSupport.cpp index 53d0f0b..a514c8f 100644 --- a/src/backends/neon/NeonLayerSupport.cpp +++ b/src/backends/neon/NeonLayerSupport.cpp @@ -44,6 +44,7 @@ #include "workloads/NeonNegWorkload.hpp" #include "workloads/NeonNormalizationFloatWorkload.hpp" #include "workloads/NeonFullyConnectedWorkload.hpp" +#include "workloads/NeonGatherWorkload.hpp" #include "workloads/NeonPadWorkload.hpp" #include "workloads/NeonPermuteWorkload.hpp" #include "workloads/NeonPooling2dWorkload.hpp" @@ -435,6 +436,18 @@ bool NeonLayerSupport::IsFullyConnectedSupported(const TensorInfo& input, descriptor); } +bool NeonLayerSupport::IsGatherSupported(const TensorInfo& input0, + const TensorInfo& input1, + const TensorInfo& output, + Optional reasonIfUnsupported) const +{ + FORWARD_WORKLOAD_VALIDATE_FUNC(NeonGatherWorkloadValidate, + reasonIfUnsupported, + input0, + input1, + output); +} + bool NeonLayerSupport::IsGreaterSupported(const armnn::TensorInfo& input0, const armnn::TensorInfo& input1, const armnn::TensorInfo& output, diff --git a/src/backends/neon/NeonLayerSupport.hpp b/src/backends/neon/NeonLayerSupport.hpp index adb1891..2581782 100644 --- a/src/backends/neon/NeonLayerSupport.hpp +++ b/src/backends/neon/NeonLayerSupport.hpp @@ -123,6 +123,11 @@ public: const FullyConnectedDescriptor& descriptor, Optional reasonIfUnsupported = EmptyOptional()) const override; + bool IsGatherSupported(const TensorInfo& input0, + const TensorInfo& input1, + const TensorInfo& output, + Optional reasonIfUnsupported) const override; + ARMNN_DEPRECATED_MSG("Use IsComparisonSupported instead") bool IsGreaterSupported(const TensorInfo& input0, const TensorInfo& input1, diff --git a/src/backends/neon/NeonWorkloadFactory.cpp b/src/backends/neon/NeonWorkloadFactory.cpp index ee0e703..35082f7 100644 --- a/src/backends/neon/NeonWorkloadFactory.cpp +++ b/src/backends/neon/NeonWorkloadFactory.cpp @@ -280,7 +280,7 @@ std::unique_ptr NeonWorkloadFactory::CreateFullyConnected( std::unique_ptr NeonWorkloadFactory::CreateGather(const armnn::GatherQueueDescriptor& descriptor, const armnn::WorkloadInfo& info) const { - return MakeWorkloadHelper(descriptor, info); + return std::make_unique(descriptor, info); } std::unique_ptr NeonWorkloadFactory::CreateGreater(const GreaterQueueDescriptor& descriptor, diff --git a/src/backends/neon/backend.mk b/src/backends/neon/backend.mk index 225687f..9e9c38c 100644 --- a/src/backends/neon/backend.mk +++ b/src/backends/neon/backend.mk @@ -42,6 +42,7 @@ BACKEND_SOURCES := \ workloads/NeonExpWorkload.cpp \ workloads/NeonFloorFloatWorkload.cpp \ workloads/NeonFullyConnectedWorkload.cpp \ + workloads/NeonGatherWorkload.cpp \ workloads/NeonInstanceNormalizationWorkload.cpp \ workloads/NeonL2NormalizationFloatWorkload.cpp \ workloads/NeonLstmFloatWorkload.cpp \ diff --git a/src/backends/neon/test/NeonLayerTests.cpp b/src/backends/neon/test/NeonLayerTests.cpp index 0e0558b..1ac2c61 100644 --- a/src/backends/neon/test/NeonLayerTests.cpp +++ b/src/backends/neon/test/NeonLayerTests.cpp @@ -673,6 +673,12 @@ ARMNN_AUTO_TEST_CASE(L2NormalizationNonDefaultEpsilon, L2NormalizationNonDefault // Floor ARMNN_AUTO_TEST_CASE(SimpleFloor, SimpleFloorTest) +// Gather +ARMNN_AUTO_TEST_CASE(Gather1dParamsFloat32, Gather1dParamsFloat32Test) +ARMNN_AUTO_TEST_CASE(Gather1dParamsUint8, Gather1dParamsUint8Test) +ARMNN_AUTO_TEST_CASE(GatherMultiDimParamsFloat32, GatherMultiDimParamsFloat32Test) +ARMNN_AUTO_TEST_CASE(GatherMultiDimParamsUint8, GatherMultiDimParamsUint8Test) + // Equal ARMNN_AUTO_TEST_CASE(EqualSimple, EqualSimpleTest) ARMNN_AUTO_TEST_CASE(EqualBroadcast1Element, EqualBroadcast1ElementTest) diff --git a/src/backends/neon/workloads/CMakeLists.txt b/src/backends/neon/workloads/CMakeLists.txt index 6b3fe67..685d75b 100644 --- a/src/backends/neon/workloads/CMakeLists.txt +++ b/src/backends/neon/workloads/CMakeLists.txt @@ -46,6 +46,8 @@ list(APPEND armnnNeonBackendWorkloads_sources NeonFloorFloatWorkload.hpp NeonFullyConnectedWorkload.cpp NeonFullyConnectedWorkload.hpp + NeonGatherWorkload.cpp + NeonGatherWorkload.hpp NeonInstanceNormalizationWorkload.cpp NeonInstanceNormalizationWorkload.hpp NeonL2NormalizationFloatWorkload.cpp diff --git a/src/backends/neon/workloads/NeonConstantWorkload.cpp b/src/backends/neon/workloads/NeonConstantWorkload.cpp index f7c8a73..05fdcf2 100644 --- a/src/backends/neon/workloads/NeonConstantWorkload.cpp +++ b/src/backends/neon/workloads/NeonConstantWorkload.cpp @@ -23,7 +23,7 @@ arm_compute::Status NeonConstantWorkloadValidate(const TensorInfo& output) { const arm_compute::TensorInfo neonOutputInfo = armcomputetensorutils::BuildArmComputeTensorInfo(output); - std::array supportedTypes = { + std::array supportedTypes = { arm_compute::DataType::BFLOAT16, arm_compute::DataType::F16, arm_compute::DataType::F32, @@ -31,7 +31,8 @@ arm_compute::Status NeonConstantWorkloadValidate(const TensorInfo& output) arm_compute::DataType::QASYMM8_SIGNED, arm_compute::DataType::QSYMM16, arm_compute::DataType::QSYMM8, - arm_compute::DataType::QSYMM8_PER_CHANNEL + arm_compute::DataType::QSYMM8_PER_CHANNEL, + arm_compute::DataType::S32 }; auto it = std::find(begin(supportedTypes), end(supportedTypes), neonOutputInfo.data_type()); @@ -110,6 +111,11 @@ void NeonConstantWorkload::Execute() const CopyArmComputeITensorData(data.m_LayerOutput->GetConstTensor(), output); break; } + case arm_compute::DataType::S32: + { + CopyArmComputeITensorData(data.m_LayerOutput->GetConstTensor(), output); + break; + } default: { ARMNN_ASSERT_MSG(false, "Unknown data type"); diff --git a/src/backends/neon/workloads/NeonGatherWorkload.cpp b/src/backends/neon/workloads/NeonGatherWorkload.cpp new file mode 100644 index 0000000..2e7c741 --- /dev/null +++ b/src/backends/neon/workloads/NeonGatherWorkload.cpp @@ -0,0 +1,46 @@ +// +// Copyright © 2020 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "NeonGatherWorkload.hpp" +#include "NeonWorkloadUtils.hpp" +#include +#include + +namespace armnn +{ +arm_compute::Status NeonGatherWorkloadValidate(const TensorInfo& input, + const TensorInfo& indices, + const TensorInfo& output) +{ + const arm_compute::TensorInfo aclInput = BuildArmComputeTensorInfo(input); + const arm_compute::TensorInfo aclIndices = BuildArmComputeTensorInfo(indices); + const arm_compute::TensorInfo aclOutput = BuildArmComputeTensorInfo(output); + + int aclAxis = ComputeAclAxis(0, input); + + return arm_compute::NEGather::validate(&aclInput, &aclIndices, &aclOutput, aclAxis); +} + +NeonGatherWorkload::NeonGatherWorkload(const GatherQueueDescriptor& descriptor, + const WorkloadInfo& info) + : BaseWorkload(descriptor, info) +{ + m_Data.ValidateInputsOutputs("NeonGatherWorkload", 1, 1); + + arm_compute::ITensor& input = PolymorphicDowncast(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& indices = PolymorphicDowncast(m_Data.m_Inputs[1])->GetTensor(); + arm_compute::ITensor& output = PolymorphicDowncast(m_Data.m_Outputs[0])->GetTensor(); + + int aclAxis = ComputeAclAxis(0, info.m_InputTensorInfos[0]); + + m_Layer.configure(&input, &indices, &output, aclAxis); +} + +void NeonGatherWorkload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonGatherWorkload_Execute"); + m_Layer.run(); +} +} //namespace armnn \ No newline at end of file diff --git a/src/backends/neon/workloads/NeonGatherWorkload.hpp b/src/backends/neon/workloads/NeonGatherWorkload.hpp new file mode 100644 index 0000000..b1b47a5 --- /dev/null +++ b/src/backends/neon/workloads/NeonGatherWorkload.hpp @@ -0,0 +1,28 @@ +// +// Copyright © 2020 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include + +#include + +namespace armnn +{ +arm_compute::Status NeonGatherWorkloadValidate(const TensorInfo& input, + const TensorInfo& indices, + const TensorInfo& output); + +class NeonGatherWorkload : public BaseWorkload +{ +public: + NeonGatherWorkload(const GatherQueueDescriptor& descriptor, const WorkloadInfo& info); + virtual void Execute() const override; + +private: + mutable arm_compute::NEGather m_Layer; +}; + +} //namespace armnn \ No newline at end of file diff --git a/src/backends/neon/workloads/NeonWorkloads.hpp b/src/backends/neon/workloads/NeonWorkloads.hpp index 2da6ea0..243f5a4 100644 --- a/src/backends/neon/workloads/NeonWorkloads.hpp +++ b/src/backends/neon/workloads/NeonWorkloads.hpp @@ -26,6 +26,7 @@ #include "NeonExpWorkload.hpp" #include "NeonFloorFloatWorkload.hpp" #include "NeonFullyConnectedWorkload.hpp" +#include "NeonGatherWorkload.hpp" #include "NeonInstanceNormalizationWorkload.hpp" #include "NeonL2NormalizationFloatWorkload.hpp" #include "NeonLstmFloatWorkload.hpp" -- 2.7.4