From: Nattapat Chaimanowong Date: Thu, 20 Dec 2018 14:14:06 +0000 (+0000) Subject: IVGCVSW-2212 Add Neon support for Maximum operator X-Git-Tag: submit/tizen/20200316.035456~984 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=4e6597a26059bfa31ea24e190e2afe0558cea4c8;p=platform%2Fupstream%2Farmnn.git IVGCVSW-2212 Add Neon support for Maximum operator Change-Id: Iddae3486641d2d195827ae4e0a9dfa3f7ccd65e3 --- diff --git a/src/backends/neon/NeonLayerSupport.cpp b/src/backends/neon/NeonLayerSupport.cpp index 869fd03..724455d 100644 --- a/src/backends/neon/NeonLayerSupport.cpp +++ b/src/backends/neon/NeonLayerSupport.cpp @@ -23,6 +23,7 @@ #include "workloads/NeonConvolution2dWorkload.hpp" #include "workloads/NeonDepthwiseConvolutionWorkload.hpp" #include "workloads/NeonL2NormalizationFloatWorkload.hpp" +#include "workloads/NeonMaximumWorkload.hpp" #include "workloads/NeonMergerWorkload.hpp" #include "workloads/NeonMultiplicationFloatWorkload.hpp" #include "workloads/NeonNormalizationFloatWorkload.hpp" @@ -346,6 +347,18 @@ bool NeonLayerSupport::IsLstmSupported(const TensorInfo& input, return false; } +bool NeonLayerSupport::IsMaximumSupported(const TensorInfo& input0, + const TensorInfo& input1, + const TensorInfo& output, + Optional reasonIfUnsupported) const +{ + FORWARD_WORKLOAD_VALIDATE_FUNC(NeonMaximumWorkloadValidate, + reasonIfUnsupported, + input0, + input1, + output); +} + bool NeonLayerSupport::IsMeanSupported(const TensorInfo& input, const TensorInfo& output, const MeanDescriptor& descriptor, diff --git a/src/backends/neon/NeonLayerSupport.hpp b/src/backends/neon/NeonLayerSupport.hpp index 43d0bd9..2871c79 100644 --- a/src/backends/neon/NeonLayerSupport.hpp +++ b/src/backends/neon/NeonLayerSupport.hpp @@ -121,6 +121,11 @@ public: const TensorInfo* cellToOutputWeights, Optional reasonIfUnsupported = EmptyOptional()) const override; + bool IsMaximumSupported(const TensorInfo& input0, + const TensorInfo& input1, + const TensorInfo& output, + Optional reasonIfUnsupported = EmptyOptional()) const override; + bool IsMeanSupported(const TensorInfo& input, const TensorInfo& output, const MeanDescriptor& descriptor, diff --git a/src/backends/neon/NeonWorkloadFactory.cpp b/src/backends/neon/NeonWorkloadFactory.cpp index f7b2133..848a6f3 100644 --- a/src/backends/neon/NeonWorkloadFactory.cpp +++ b/src/backends/neon/NeonWorkloadFactory.cpp @@ -267,7 +267,7 @@ std::unique_ptr NeonWorkloadFactory::CreateConvertFp32ToFp16( std::unique_ptr NeonWorkloadFactory::CreateMaximum(const MaximumQueueDescriptor& descriptor, const WorkloadInfo& info) const { - return MakeWorkloadHelper(descriptor, info); + return std::make_unique(descriptor, info); } std::unique_ptr NeonWorkloadFactory::CreateMean(const MeanQueueDescriptor& descriptor, diff --git a/src/backends/neon/backend.mk b/src/backends/neon/backend.mk index 88fb643..a3058ad 100644 --- a/src/backends/neon/backend.mk +++ b/src/backends/neon/backend.mk @@ -25,6 +25,7 @@ BACKEND_SOURCES := \ workloads/NeonFullyConnectedWorkload.cpp \ workloads/NeonL2NormalizationFloatWorkload.cpp \ workloads/NeonLstmFloatWorkload.cpp \ + workloads/NeonMaximumWorkload.cpp \ workloads/NeonMergerWorkload.cpp \ workloads/NeonMultiplicationFloatWorkload.cpp \ workloads/NeonNormalizationFloatWorkload.cpp \ diff --git a/src/backends/neon/test/NeonLayerTests.cpp b/src/backends/neon/test/NeonLayerTests.cpp index 72ae372..37933e0 100644 --- a/src/backends/neon/test/NeonLayerTests.cpp +++ b/src/backends/neon/test/NeonLayerTests.cpp @@ -399,6 +399,14 @@ ARMNN_AUTO_TEST_CASE(LstmLayerFloat32NoCifgNoPeepholeNoProjection, ARMNN_AUTO_TEST_CASE(LstmLayerFloat32NoCifgWithPeepholeWithProjection, LstmLayerFloat32NoCifgWithPeepholeWithProjectionTest) +// Max +ARMNN_AUTO_TEST_CASE(SimpleMaximum, MaximumSimpleTest) +ARMNN_AUTO_TEST_CASE(MaximumBroadcast1Element, MaximumBroadcast1ElementTest) +ARMNN_AUTO_TEST_CASE(MaximumBroadcast1DVector, MaximumBroadcast1DVectorTest) +ARMNN_AUTO_TEST_CASE(MaximumUint8, MaximumUint8Test) +ARMNN_AUTO_TEST_CASE(MaximumBroadcast1ElementUint8, MaximumBroadcast1ElementUint8Test) +ARMNN_AUTO_TEST_CASE(MaximumBroadcast1DVectorUint8, MaximumBroadcast1DVectorUint8Test) + // Normalization ARMNN_AUTO_TEST_CASE(SimpleNormalizationAcross, SimpleNormalizationAcrossTest) ARMNN_AUTO_TEST_CASE(SimpleNormalizationWithin, SimpleNormalizationWithinTest) diff --git a/src/backends/neon/workloads/CMakeLists.txt b/src/backends/neon/workloads/CMakeLists.txt index 2c2c9f0..9961397 100644 --- a/src/backends/neon/workloads/CMakeLists.txt +++ b/src/backends/neon/workloads/CMakeLists.txt @@ -28,6 +28,8 @@ list(APPEND armnnNeonBackendWorkloads_sources NeonL2NormalizationFloatWorkload.hpp NeonLstmFloatWorkload.cpp NeonLstmFloatWorkload.hpp + NeonMaximumWorkload.cpp + NeonMaximumWorkload.hpp NeonMergerWorkload.cpp NeonMergerWorkload.hpp NeonMultiplicationFloatWorkload.cpp diff --git a/src/backends/neon/workloads/NeonMaximumWorkload.cpp b/src/backends/neon/workloads/NeonMaximumWorkload.cpp new file mode 100644 index 0000000..9994af9 --- /dev/null +++ b/src/backends/neon/workloads/NeonMaximumWorkload.cpp @@ -0,0 +1,45 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "NeonMaximumWorkload.hpp" +#include +#include + +namespace armnn +{ + +arm_compute::Status NeonMaximumWorkloadValidate(const TensorInfo& input0, + const TensorInfo& input1, + const TensorInfo& output) +{ + const arm_compute::TensorInfo aclInput0 = armcomputetensorutils::BuildArmComputeTensorInfo(input0); + const arm_compute::TensorInfo aclInput1 = armcomputetensorutils::BuildArmComputeTensorInfo(input1); + const arm_compute::TensorInfo aclOutput = armcomputetensorutils::BuildArmComputeTensorInfo(output); + + return arm_compute::NEElementwiseMax::validate(&aclInput0, + &aclInput1, + &aclOutput); +} + +NeonMaximumWorkload::NeonMaximumWorkload(const MaximumQueueDescriptor& descriptor, + const WorkloadInfo& info) + : BaseWorkload(descriptor, info) +{ + m_Data.ValidateInputsOutputs("NeonMaximumWorkload", 2, 1); + + arm_compute::ITensor& input0 = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& input1 = boost::polymorphic_downcast(m_Data.m_Inputs[1])->GetTensor(); + arm_compute::ITensor& output = boost::polymorphic_downcast(m_Data.m_Outputs[0])->GetTensor(); + + m_MaxLayer.configure(&input0, &input1, &output); +} + +void NeonMaximumWorkload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonMaximumWorkload_Execute"); + m_MaxLayer.run(); +} + +} //namespace armnn diff --git a/src/backends/neon/workloads/NeonMaximumWorkload.hpp b/src/backends/neon/workloads/NeonMaximumWorkload.hpp new file mode 100644 index 0000000..59e2d61 --- /dev/null +++ b/src/backends/neon/workloads/NeonMaximumWorkload.hpp @@ -0,0 +1,30 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include + +#include + +namespace armnn +{ + +arm_compute::Status NeonMaximumWorkloadValidate(const TensorInfo& input0, + const TensorInfo& input1, + const TensorInfo& output); + +class NeonMaximumWorkload : public BaseWorkload +{ +public: + NeonMaximumWorkload(const MaximumQueueDescriptor& descriptor, const WorkloadInfo& info); + + virtual void Execute() const override; + +private: + mutable arm_compute::NEElementwiseMax m_MaxLayer; +}; + +} //namespace armnn diff --git a/src/backends/neon/workloads/NeonWorkloads.hpp b/src/backends/neon/workloads/NeonWorkloads.hpp index 48bd137..82142f2 100644 --- a/src/backends/neon/workloads/NeonWorkloads.hpp +++ b/src/backends/neon/workloads/NeonWorkloads.hpp @@ -16,6 +16,7 @@ #include "NeonFullyConnectedWorkload.hpp" #include "NeonL2NormalizationFloatWorkload.hpp" #include "NeonLstmFloatWorkload.hpp" +#include "NeonMaximumWorkload.hpp" #include "NeonMergerWorkload.hpp" #include "NeonMultiplicationFloatWorkload.hpp" #include "NeonNormalizationFloatWorkload.hpp"