IVGCVSW-3695 Add CL ArgMinMax workload
authorJames Conroy <james.conroy@arm.com>
Thu, 19 Sep 2019 16:00:31 +0000 (17:00 +0100)
committerJames Conroy <james.conroy@arm.com>
Wed, 13 Nov 2019 12:17:48 +0000 (12:17 +0000)
 * Also enabled copy to/from CL for Signed32.

Signed-off-by: James Conroy <james.conroy@arm.com>
Change-Id: I0113182891f9767de73f04dcd81252c84c996eda

12 files changed:
src/backends/cl/ClLayerSupport.cpp
src/backends/cl/ClLayerSupport.hpp
src/backends/cl/ClTensorHandle.hpp
src/backends/cl/ClWorkloadFactory.cpp
src/backends/cl/ClWorkloadFactory.hpp
src/backends/cl/backend.mk
src/backends/cl/test/ClLayerTests.cpp
src/backends/cl/workloads/CMakeLists.txt
src/backends/cl/workloads/ClArgMinMaxWorkload.cpp [new file with mode: 0644]
src/backends/cl/workloads/ClArgMinMaxWorkload.hpp [new file with mode: 0644]
src/backends/cl/workloads/ClWorkloads.hpp
src/backends/neon/test/NeonLayerTests.cpp

index 6dcda96..ed570bc 100644 (file)
@@ -19,6 +19,7 @@
 #include "workloads/ClAbsWorkload.hpp"
 #include "workloads/ClAdditionWorkload.hpp"
 #include "workloads/ClActivationWorkload.hpp"
+#include "workloads/ClArgMinMaxWorkload.hpp"
 #include "workloads/ClBatchNormalizationFloatWorkload.hpp"
 #include "workloads/ClBatchToSpaceNdWorkload.hpp"
 #include "workloads/ClConvertFp16ToFp32Workload.hpp"
@@ -178,6 +179,18 @@ bool ClLayerSupport::IsAdditionSupported(const TensorInfo& input0,
                                    output);
 }
 
+bool ClLayerSupport::IsArgMinMaxSupported(const TensorInfo& input,
+                                          const TensorInfo& output,
+                                          const ArgMinMaxDescriptor& descriptor,
+                                          Optional<std::string&> reasonIfUnsupported) const
+{
+    FORWARD_WORKLOAD_VALIDATE_FUNC(ClArgMinMaxWorkloadValidate,
+                                   reasonIfUnsupported,
+                                   input,
+                                   output,
+                                   descriptor);
+}
+
 bool ClLayerSupport::IsBatchNormalizationSupported(const TensorInfo& input,
                                                    const TensorInfo& output,
                                                    const TensorInfo& mean,
index 26eb42e..219ce3b 100644 (file)
@@ -26,6 +26,11 @@ public:
                              const TensorInfo& output,
                              Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    bool IsArgMinMaxSupported(const TensorInfo& input,
+                              const TensorInfo& output,
+                              const ArgMinMaxDescriptor& descriptor,
+                              Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
     bool IsBatchNormalizationSupported(const TensorInfo& input,
                                        const TensorInfo& output,
                                        const TensorInfo& mean,
index f5583c0..2423a8b 100644 (file)
@@ -110,6 +110,10 @@ private:
                 armcomputetensorutils::CopyArmComputeITensorData(this->GetTensor(),
                                                                  static_cast<int16_t*>(memory));
                 break;
+            case arm_compute::DataType::S32:
+                armcomputetensorutils::CopyArmComputeITensorData(this->GetTensor(),
+                                                                 static_cast<int32_t*>(memory));
+                break;
             default:
             {
                 throw armnn::UnimplementedException();
@@ -142,6 +146,10 @@ private:
                 armcomputetensorutils::CopyArmComputeITensorData(static_cast<const int16_t*>(memory),
                                                                  this->GetTensor());
                 break;
+            case arm_compute::DataType::S32:
+                armcomputetensorutils::CopyArmComputeITensorData(static_cast<const int32_t*>(memory),
+                                                                 this->GetTensor());
+                break;
             default:
             {
                 throw armnn::UnimplementedException();
@@ -222,6 +230,10 @@ private:
                 armcomputetensorutils::CopyArmComputeITensorData(this->GetTensor(),
                                                                  static_cast<int16_t*>(memory));
                 break;
+            case arm_compute::DataType::S32:
+                armcomputetensorutils::CopyArmComputeITensorData(this->GetTensor(),
+                                                                 static_cast<int32_t*>(memory));
+                break;
             default:
             {
                 throw armnn::UnimplementedException();
@@ -254,6 +266,10 @@ private:
                 armcomputetensorutils::CopyArmComputeITensorData(static_cast<const int16_t*>(memory),
                                                                  this->GetTensor());
                 break;
+            case arm_compute::DataType::S32:
+                armcomputetensorutils::CopyArmComputeITensorData(static_cast<const int32_t*>(memory),
+                                                                 this->GetTensor());
+                break;
             default:
             {
                 throw armnn::UnimplementedException();
index 04e09f4..2a7c8fe 100644 (file)
@@ -144,6 +144,12 @@ std::unique_ptr<IWorkload> ClWorkloadFactory::CreateAddition(const AdditionQueue
     return MakeWorkload<ClAdditionWorkload>(descriptor, info);
 }
 
+std::unique_ptr<IWorkload> ClWorkloadFactory::CreateArgMinMax(const ArgMinMaxQueueDescriptor& descriptor,
+                                                              const WorkloadInfo& info) const
+{
+    return std::make_unique<ClArgMinMaxWorkload>(descriptor, info);
+}
+
 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateBatchNormalization(
     const BatchNormalizationQueueDescriptor& descriptor,
     const WorkloadInfo& info) const
index 1cae6e1..18d12be 100644 (file)
@@ -47,6 +47,9 @@ public:
     std::unique_ptr<IWorkload> CreateAddition(const AdditionQueueDescriptor& descriptor,
                                               const WorkloadInfo& info) const override;
 
+    std::unique_ptr<IWorkload> CreateArgMinMax(const ArgMinMaxQueueDescriptor& descriptor,
+                                               const WorkloadInfo& info) const override;
+
     std::unique_ptr<IWorkload> CreateBatchNormalization(const BatchNormalizationQueueDescriptor& descriptor,
                                                         const WorkloadInfo& info) const override;
 
index b78bae1..c439186 100644 (file)
@@ -25,6 +25,7 @@ BACKEND_SOURCES := \
         workloads/ClAbsWorkload.cpp \
         workloads/ClActivationWorkload.cpp \
         workloads/ClAdditionWorkload.cpp \
+        workloads/ClArgMinMaxWorkload.cpp \
         workloads/ClBatchNormalizationFloatWorkload.cpp \
         workloads/ClBatchToSpaceNdWorkload.cpp \
         workloads/ClConcatWorkload.cpp \
index 0fc8ece..7d1fb8b 100644 (file)
@@ -872,6 +872,14 @@ ARMNN_AUTO_TEST_CASE(Abs3dFloat16, Abs3dTest<DataType::Float16>)
 
 ARMNN_AUTO_TEST_CASE(AbsZeroFloat16, AbsZeroTest<DataType::Float16>)
 
+// ArgMinMax
+ARMNN_AUTO_TEST_CASE(ArgMinFloat32, ArgMinSimpleTest<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE(ArgMaxFloat32, ArgMaxSimpleTest<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE(ArgMinChannel, ArgMinChannelTest<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE(ArgMaxChannel, ArgMaxChannelTest<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE(ArgMaxHeight, ArgMaxHeightTest<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE(ArgMinWidth, ArgMinWidthTest<DataType::Float32>)
+
 #if defined(ARMNNREF_ENABLED)
 
 // The ARMNN_COMPARE_REF_AUTO_TEST_CASE and the ARMNN_COMPARE_REF_FIXTURE_TEST_CASE test units are not available
index a9f320d..94c4a3e 100644 (file)
@@ -10,6 +10,8 @@ list(APPEND armnnClBackendWorkloads_sources
     ClActivationWorkload.hpp
     ClAdditionWorkload.cpp
     ClAdditionWorkload.hpp
+    ClArgMinMaxWorkload.cpp
+    ClArgMinMaxWorkload.hpp
     ClBatchNormalizationFloatWorkload.cpp
     ClBatchNormalizationFloatWorkload.hpp
     ClBatchToSpaceNdWorkload.cpp
diff --git a/src/backends/cl/workloads/ClArgMinMaxWorkload.cpp b/src/backends/cl/workloads/ClArgMinMaxWorkload.cpp
new file mode 100644 (file)
index 0000000..3270b0a
--- /dev/null
@@ -0,0 +1,82 @@
+//
+// Copyright © 2019 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "ClArgMinMaxWorkload.hpp"
+#include "ClWorkloadUtils.hpp"
+
+#include <aclCommon/ArmComputeTensorUtils.hpp>
+
+#include <backendsCommon/CpuTensorHandle.hpp>
+
+#include <TensorUtils.hpp>
+
+#include <cl/ClTensorHandle.hpp>
+#include <cl/ClLayerSupport.hpp>
+
+namespace
+{
+unsigned int CalcAclAxis(unsigned int numDimensions, unsigned int axisIndex)
+{
+    return (numDimensions - axisIndex) - 1;
+}
+
+} //namespace
+
+namespace armnn
+{
+
+arm_compute::Status ClArgMinMaxWorkloadValidate(const TensorInfo& input,
+                                                const TensorInfo& output,
+                                                const ArgMinMaxDescriptor& descriptor)
+{
+    const arm_compute::TensorInfo aclInput = armcomputetensorutils::BuildArmComputeTensorInfo(input);
+    const arm_compute::TensorInfo aclOutput = armcomputetensorutils::BuildArmComputeTensorInfo(output);
+
+    auto numDims = input.GetNumDimensions();
+    auto unsignedAxis = armnnUtils::GetUnsignedAxis(numDims, descriptor.m_Axis);
+    int aclAxis = boost::numeric_cast<int>(CalcAclAxis(numDims, unsignedAxis));
+
+    if (descriptor.m_Function == ArgMinMaxFunction::Max)
+    {
+        return arm_compute::CLArgMinMaxLayer::validate(&aclInput, aclAxis, &aclOutput,
+                                                       arm_compute::ReductionOperation::ARG_IDX_MAX);
+    }
+    else
+    {
+        return arm_compute::CLArgMinMaxLayer::validate(&aclInput, aclAxis, &aclOutput,
+                                                       arm_compute::ReductionOperation::ARG_IDX_MIN);
+    }
+}
+
+
+ClArgMinMaxWorkload::ClArgMinMaxWorkload(const ArgMinMaxQueueDescriptor& descriptor,
+                                         const WorkloadInfo& info)
+        : BaseWorkload<ArgMinMaxQueueDescriptor>(descriptor, info)
+{
+    arm_compute::ICLTensor& input = static_cast<IClTensorHandle*>(this->m_Data.m_Inputs[0])->GetTensor();
+    arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(this->m_Data.m_Outputs[0])->GetTensor();
+
+    auto numDims = info.m_InputTensorInfos[0].GetNumDimensions();
+    auto unsignedAxis = armnnUtils::GetUnsignedAxis(numDims, m_Data.m_Parameters.m_Axis);
+    int aclAxis = boost::numeric_cast<int>(CalcAclAxis(numDims, unsignedAxis));
+
+    if (m_Data.m_Parameters.m_Function == ArgMinMaxFunction::Max)
+    {
+        m_ArgMinMaxLayer.configure(&input, aclAxis, &output, arm_compute::ReductionOperation::ARG_IDX_MAX);
+    }
+    else
+    {
+        m_ArgMinMaxLayer.configure(&input, aclAxis, &output, arm_compute::ReductionOperation::ARG_IDX_MIN);
+    }
+}
+
+void ClArgMinMaxWorkload::Execute() const
+{
+    ARMNN_SCOPED_PROFILING_EVENT_CL("ClArgMinMaxWorkload_Execute");
+    RunClFunction(m_ArgMinMaxLayer, CHECK_LOCATION());
+}
+
+} //namespace armnn
+
diff --git a/src/backends/cl/workloads/ClArgMinMaxWorkload.hpp b/src/backends/cl/workloads/ClArgMinMaxWorkload.hpp
new file mode 100644 (file)
index 0000000..54f28e6
--- /dev/null
@@ -0,0 +1,30 @@
+//
+// Copyright © 2019 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <backendsCommon/Workload.hpp>
+
+#include <arm_compute/core/Error.h>
+#include <arm_compute/runtime/CL/functions/CLArgMinMaxLayer.h>
+
+namespace armnn
+{
+
+arm_compute::Status ClArgMinMaxWorkloadValidate(const TensorInfo& input,
+                                                const TensorInfo& output,
+                                                const ArgMinMaxDescriptor& descriptor);
+
+class ClArgMinMaxWorkload : public BaseWorkload<ArgMinMaxQueueDescriptor>
+{
+public:
+    ClArgMinMaxWorkload(const ArgMinMaxQueueDescriptor& descriptor, const WorkloadInfo& info);
+    virtual void Execute() const override;
+
+private:
+    mutable arm_compute::CLArgMinMaxLayer m_ArgMinMaxLayer;
+};
+
+} //namespace armnn
index cd6ca5f..dd8c699 100644 (file)
@@ -7,6 +7,7 @@
 #include "ClAbsWorkload.hpp"
 #include "ClActivationWorkload.hpp"
 #include "ClAdditionWorkload.hpp"
+#include "ClArgMinMaxWorkload.hpp"
 #include "ClConstantWorkload.hpp"
 #include "ClBatchNormalizationFloatWorkload.hpp"
 #include "ClBatchToSpaceNdWorkload.hpp"
index ef3c837..ea9c813 100644 (file)
@@ -955,6 +955,8 @@ ARMNN_AUTO_TEST_CASE(ArgMinFloat32, ArgMinSimpleTest<DataType::Float32>)
 ARMNN_AUTO_TEST_CASE(ArgMaxFloat32, ArgMaxSimpleTest<DataType::Float32>)
 ARMNN_AUTO_TEST_CASE(ArgMinChannel, ArgMinChannelTest<DataType::Float32>)
 ARMNN_AUTO_TEST_CASE(ArgMaxChannel, ArgMaxChannelTest<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE(ArgMaxHeight, ArgMaxHeightTest<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE(ArgMinWidth, ArgMinWidthTest<DataType::Float32>)
 
 #if defined(ARMNNREF_ENABLED)