MLCE-156: Add Division workload in CpuAcc
authorPablo Tello <pablo.tello@arm.com>
Thu, 23 Jan 2020 10:37:17 +0000 (10:37 +0000)
committerPablo Tello <pablo.tello@arm.com>
Fri, 24 Jan 2020 14:50:49 +0000 (14:50 +0000)
Change-Id: I1f228fcaf1077867d9755a2b850c6703387fab34
Signed-off-by: Pablo Tello <pablo.tello@arm.com>
src/backends/neon/NeonLayerSupport.cpp
src/backends/neon/NeonLayerSupport.hpp
src/backends/neon/NeonWorkloadFactory.cpp
src/backends/neon/backend.mk
src/backends/neon/test/NeonCreateWorkloadTests.cpp
src/backends/neon/test/NeonLayerTests.cpp
src/backends/neon/workloads/CMakeLists.txt
src/backends/neon/workloads/NeonDivisionWorkload.cpp [new file with mode: 0644]
src/backends/neon/workloads/NeonDivisionWorkload.hpp [new file with mode: 0644]
src/backends/neon/workloads/NeonWorkloads.hpp

index 47bcc2e79513ed576c68c194749aa0e660cdcf7e..6ca69f48416e82ff5b3f3e060e5b82d6cec901fa 100644 (file)
@@ -38,6 +38,7 @@
 #include "workloads/NeonConcatWorkload.hpp"
 #include "workloads/NeonMinimumWorkload.hpp"
 #include "workloads/NeonMultiplicationWorkload.hpp"
+#include "workloads/NeonDivisionWorkload.hpp"
 #include "workloads/NeonNormalizationFloatWorkload.hpp"
 #include "workloads/NeonFullyConnectedWorkload.hpp"
 #include "workloads/NeonPadWorkload.hpp"
@@ -554,6 +555,18 @@ bool NeonLayerSupport::IsMultiplicationSupported(const TensorInfo& input0,
                                    output);
 }
 
+bool NeonLayerSupport::IsDivisionSupported(const TensorInfo& input0,
+                                           const TensorInfo& input1,
+                                           const TensorInfo& output,
+                                           Optional<std::string&> reasonIfUnsupported) const
+{
+    FORWARD_WORKLOAD_VALIDATE_FUNC(NeonDivisionWorkloadValidate,
+                                   reasonIfUnsupported,
+                                   input0,
+                                   input1,
+                                   output);
+}
+
 bool NeonLayerSupport::IsNormalizationSupported(const TensorInfo& input,
                                                 const TensorInfo& output,
                                                 const NormalizationDescriptor& descriptor,
index e49e78b3d2490c832b8715fba6c19e9aaca523b7..f1d87f65f36c7dca634afae3722ab0c145e798cb 100644 (file)
@@ -176,6 +176,11 @@ public:
                                    const TensorInfo& output,
                                    Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    bool IsDivisionSupported(const TensorInfo& input0,
+                             const TensorInfo& input1,
+                             const TensorInfo& output,
+                             Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
     bool IsNormalizationSupported(const TensorInfo& input,
                                   const TensorInfo& output,
                                   const NormalizationDescriptor& descriptor,
index 649cb9f6994c0b61536c4bddfe9bdea7f0e12e51..cb2e88eddb0732372f338c575408f02cbb819eac 100644 (file)
@@ -216,10 +216,10 @@ std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateDetectionPostProcess(
 std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateDivision(
     const DivisionQueueDescriptor& descriptor, const WorkloadInfo& info) const
 {
-    return MakeWorkloadHelper<NullWorkload, NullWorkload>(descriptor, info);
+    return std::make_unique<NeonDivisionWorkload>(descriptor, info);
 }
 
-std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateElementwiseUnary(const ElementwiseUnaryQueueDescriptor& 
+std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateElementwiseUnary(const ElementwiseUnaryQueueDescriptor&
                                                                        descriptor,
                                                                        const WorkloadInfo& info) const
 {
index 05dcd02c9a761964ecbfa3ae15d3d93cfd462746..740cbcd1ff9d76f15e5721a8a981864403e0be7a 100644 (file)
@@ -45,6 +45,7 @@ BACKEND_SOURCES := \
         workloads/NeonMeanWorkload.cpp \
         workloads/NeonMinimumWorkload.cpp \
         workloads/NeonMultiplicationWorkload.cpp \
+        workloads/NeonDivisionWorkload.cpp \
         workloads/NeonNormalizationFloatWorkload.cpp \
         workloads/NeonPadWorkload.cpp \
         workloads/NeonPermuteWorkload.cpp \
index 400a5a38e21fdfc0f41d3e6b0a135fc7c35a192b..3e1888cb547a25f34a6bbe6993f5411fb29667a5 100644 (file)
@@ -181,6 +181,14 @@ BOOST_AUTO_TEST_CASE(CreateMultiplicationUint8Workload)
                                       DataType::QAsymmU8>();
 }
 
+BOOST_AUTO_TEST_CASE(CreateDivisionFloatWorkloadTest)
+{
+    NeonCreateElementwiseWorkloadTest<NeonDivisionWorkload,
+                                      DivisionQueueDescriptor,
+                                      DivisionLayer,
+                                      armnn::DataType::Float32>();
+}
+
 template <typename BatchNormalizationWorkloadType, typename armnn::DataType DataType>
 static void NeonCreateBatchNormalizationWorkloadTest(DataLayout dataLayout)
 {
index f14b2a446bab8b85ddad88a3809a64b182c73a1c..1b25cad9933f79489fdc5065434a1333e9e1dffc 100644 (file)
@@ -491,6 +491,12 @@ ARMNN_AUTO_TEST_CASE(SubtractionUint8, SubtractionUint8Test)
 ARMNN_AUTO_TEST_CASE(SubBroadcastUint8, SubtractionBroadcastUint8Test)
 ARMNN_AUTO_TEST_CASE(SubBroadcast1ElementUint8, SubtractionBroadcast1ElementUint8Test)
 
+// Div
+ARMNN_AUTO_TEST_CASE(SimpleDivision, DivisionTest)
+ARMNN_AUTO_TEST_CASE(DivisionByZero, DivisionByZeroTest)
+ARMNN_AUTO_TEST_CASE(DivisionBroadcast1Element, DivisionBroadcast1ElementTest)
+ARMNN_AUTO_TEST_CASE(DivisionBroadcast1DVector, DivisionBroadcast1DVectorTest)
+
 // Mul
 ARMNN_AUTO_TEST_CASE(SimpleMultiplication, MultiplicationTest)
 ARMNN_AUTO_TEST_CASE(MultiplicationBroadcast1Element, MultiplicationBroadcast1ElementTest)
index d2c549ee25494087493c8423d282cb545a61811f..46b5332235062f90cc27ece28ae1755ab5a5909d 100644 (file)
@@ -52,6 +52,8 @@ list(APPEND armnnNeonBackendWorkloads_sources
     NeonMinimumWorkload.hpp
     NeonMultiplicationWorkload.cpp
     NeonMultiplicationWorkload.hpp
+    NeonDivisionWorkload.cpp
+    NeonDivisionWorkload.hpp
     NeonNormalizationFloatWorkload.cpp
     NeonNormalizationFloatWorkload.hpp
     NeonPadWorkload.cpp
diff --git a/src/backends/neon/workloads/NeonDivisionWorkload.cpp b/src/backends/neon/workloads/NeonDivisionWorkload.cpp
new file mode 100644 (file)
index 0000000..6fdb455
--- /dev/null
@@ -0,0 +1,45 @@
+//
+// Copyright © 2020 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "NeonDivisionWorkload.hpp"
+#include <aclCommon/ArmComputeTensorUtils.hpp>
+#include <backendsCommon/CpuTensorHandle.hpp>
+
+namespace armnn
+{
+
+arm_compute::Status NeonDivisionWorkloadValidate(const TensorInfo& input0,
+                                                const TensorInfo& input1,
+                                                const TensorInfo& output)
+{
+    const arm_compute::TensorInfo aclInput0 = armcomputetensorutils::BuildArmComputeTensorInfo(input0);
+    const arm_compute::TensorInfo aclInput1 = armcomputetensorutils::BuildArmComputeTensorInfo(input1);
+    const arm_compute::TensorInfo aclOutput = armcomputetensorutils::BuildArmComputeTensorInfo(output);
+
+    return arm_compute::NEElementwiseDivision::validate(&aclInput0,
+                                                   &aclInput1,
+                                                   &aclOutput);
+}
+
+NeonDivisionWorkload::NeonDivisionWorkload(const DivisionQueueDescriptor& descriptor,
+                                         const WorkloadInfo& info)
+    : BaseWorkload<DivisionQueueDescriptor>(descriptor, info)
+{
+    m_Data.ValidateInputsOutputs("NeonDivisionWorkload", 2, 1);
+
+    arm_compute::ITensor& input0 = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+    arm_compute::ITensor& input1 = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Inputs[1])->GetTensor();
+    arm_compute::ITensor& output = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+
+    m_DivLayer.configure(&input0, &input1, &output);
+}
+
+void NeonDivisionWorkload::Execute() const
+{
+    ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonDivisionWorkload_Execute");
+    m_DivLayer.run();
+}
+
+} //namespace armnn
diff --git a/src/backends/neon/workloads/NeonDivisionWorkload.hpp b/src/backends/neon/workloads/NeonDivisionWorkload.hpp
new file mode 100644 (file)
index 0000000..2405d9a
--- /dev/null
@@ -0,0 +1,30 @@
+//
+// Copyright © 2020 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <neon/workloads/NeonWorkloadUtils.hpp>
+
+#include <arm_compute/runtime/NEON/functions/NEElementwiseOperations.h>
+
+namespace armnn
+{
+
+arm_compute::Status NeonDivisionWorkloadValidate(const TensorInfo& input0,
+                                                const TensorInfo& input1,
+                                                const TensorInfo& output);
+
+class NeonDivisionWorkload : public BaseWorkload<DivisionQueueDescriptor>
+{
+public:
+    NeonDivisionWorkload(const DivisionQueueDescriptor& descriptor, const WorkloadInfo& info);
+
+    virtual void Execute() const override;
+
+private:
+    mutable arm_compute::NEElementwiseDivision m_DivLayer;
+};
+
+} //namespace armnn
index 104504e0971f4c09f4bbd3002949f96a45b9b720..39cf044b6ce1f15cff544b11f0f104dc780d0bd1 100644 (file)
@@ -7,6 +7,7 @@
 #include "NeonAbsWorkload.hpp"
 #include "NeonActivationWorkload.hpp"
 #include "NeonAdditionWorkload.hpp"
+#include "NeonDivisionWorkload.hpp"
 #include "NeonArgMinMaxWorkload.hpp"
 #include "NeonBatchNormalizationWorkload.hpp"
 #include "NeonConstantWorkload.hpp"