MLCE-84 Add Neon Pad support and unit tests
authorÉanna Ó Catháin <eanna.ocathain@arm.com>
Fri, 25 Jan 2019 10:01:40 +0000 (10:01 +0000)
committerNarumol Prangnawarat <narumol.prangnawarat@arm.com>
Fri, 25 Jan 2019 14:51:53 +0000 (14:51 +0000)
Change-Id: I0d949a9f23a61af5013efdd18572b29fae585f2a

src/backends/neon/NeonLayerSupport.cpp
src/backends/neon/NeonLayerSupport.hpp
src/backends/neon/NeonWorkloadFactory.cpp
src/backends/neon/backend.mk
src/backends/neon/test/NeonLayerTests.cpp
src/backends/neon/workloads/CMakeLists.txt
src/backends/neon/workloads/NeonPadWorkload.cpp [new file with mode: 0644]
src/backends/neon/workloads/NeonPadWorkload.hpp [new file with mode: 0644]
src/backends/neon/workloads/NeonWorkloads.hpp

index 9db7354..3b09676 100644 (file)
@@ -30,6 +30,7 @@
 #include "workloads/NeonMultiplicationFloatWorkload.hpp"
 #include "workloads/NeonNormalizationFloatWorkload.hpp"
 #include "workloads/NeonFullyConnectedWorkload.hpp"
+#include "workloads/NeonPadWorkload.hpp"
 #include "workloads/NeonPermuteWorkload.hpp"
 #include "workloads/NeonPooling2dWorkload.hpp"
 #include "workloads/NeonResizeBilinearWorkload.hpp"
@@ -356,6 +357,18 @@ bool NeonLayerSupport::IsOutputSupported(const TensorInfo& output,
                                          &TrueFunc<>);
 }
 
+bool NeonLayerSupport::IsPadSupported(const TensorInfo& input,
+                                      const TensorInfo& output,
+                                      const PadDescriptor& descriptor,
+                                      Optional<std::string&> reasonIfUnsupported) const
+{
+    FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPadWorkloadValidate,
+                                   reasonIfUnsupported,
+                                   input,
+                                   output,
+                                   descriptor);
+}
+
 bool NeonLayerSupport::IsPermuteSupported(const TensorInfo& input,
                                           const TensorInfo& output,
                                           const PermuteDescriptor& descriptor,
index 6316324..6de5c78 100644 (file)
@@ -112,6 +112,11 @@ public:
     bool IsOutputSupported(const TensorInfo& output,
                            Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    bool IsPadSupported(const TensorInfo& input,
+                        const TensorInfo& output,
+                        const PadDescriptor& descriptor,
+                        Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
     bool IsPermuteSupported(const TensorInfo& input,
                             const TensorInfo& output,
                             const PermuteDescriptor& descriptor,
index 76e1dd0..5c7eecc 100644 (file)
@@ -286,7 +286,7 @@ std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateMean(const MeanQueueDescri
 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreatePad(const PadQueueDescriptor& descriptor,
                                                           const WorkloadInfo& info) const
 {
-    return MakeWorkloadHelper<NullWorkload, NullWorkload>(descriptor, info);
+    return std::make_unique<NeonPadWorkload>(descriptor, info);
 }
 
 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateEqual(const EqualQueueDescriptor& descriptor,
index 1ebeae5..09dca63 100644 (file)
@@ -31,6 +31,7 @@ BACKEND_SOURCES := \
         workloads/NeonMinimumWorkload.cpp \
         workloads/NeonMultiplicationFloatWorkload.cpp \
         workloads/NeonNormalizationFloatWorkload.cpp \
+        workloads/NeonPadWorkload.cpp \
         workloads/NeonPermuteWorkload.cpp \
         workloads/NeonPooling2dWorkload.cpp \
         workloads/NeonReshapeWorkload.cpp \
index 6975374..b229cd0 100644 (file)
@@ -386,6 +386,11 @@ ARMNN_AUTO_TEST_CASE(SimpleFloor, SimpleFloorTest)
 ARMNN_AUTO_TEST_CASE(SimpleReshapeFloat32, SimpleReshapeFloat32Test)
 ARMNN_AUTO_TEST_CASE(SimpleReshapeUint8, SimpleReshapeUint8Test)
 
+// Pad
+ARMNN_AUTO_TEST_CASE(PadFloat322d, PadFloat322dTest)
+ARMNN_AUTO_TEST_CASE(PadFloat323d, PadFloat323dTest)
+ARMNN_AUTO_TEST_CASE(PadFloat324d, PadFloat324dTest)
+
 // Permute
 ARMNN_AUTO_TEST_CASE(SimplePermuteFloat32, SimplePermuteFloat32Test)
 ARMNN_AUTO_TEST_CASE(SimplePermuteUint8, SimplePermuteUint8Test)
index 713418d..1a1ed74 100644 (file)
@@ -40,6 +40,8 @@ list(APPEND armnnNeonBackendWorkloads_sources
     NeonMultiplicationFloatWorkload.hpp
     NeonNormalizationFloatWorkload.cpp
     NeonNormalizationFloatWorkload.hpp
+    NeonPadWorkload.cpp
+    NeonPadWorkload.hpp
     NeonPermuteWorkload.cpp
     NeonPermuteWorkload.hpp
     NeonPooling2dWorkload.cpp
diff --git a/src/backends/neon/workloads/NeonPadWorkload.cpp b/src/backends/neon/workloads/NeonPadWorkload.cpp
new file mode 100644 (file)
index 0000000..60d6b8a
--- /dev/null
@@ -0,0 +1,57 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "NeonPadWorkload.hpp"
+
+#include <neon/NeonTensorHandle.hpp>
+#include <aclCommon/ArmComputeTensorUtils.hpp>
+#include <arm_compute/core/Types.h>
+#include <arm_compute/runtime/NEON/functions/NEPadLayer.h>
+
+#include "NeonWorkloadUtils.hpp"
+
+namespace armnn
+{
+using namespace armcomputetensorutils;
+
+NeonPadWorkload::NeonPadWorkload(const PadQueueDescriptor& descriptor, const WorkloadInfo& info)
+    : BaseWorkload<PadQueueDescriptor>(descriptor, info)
+{
+    m_Data.ValidateInputsOutputs("NeonPadWorkload", 1, 1);
+
+    arm_compute::ITensor& input = static_cast<INeonTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+    arm_compute::ITensor& output = static_cast<INeonTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+
+    std::vector<std::pair<unsigned int, unsigned int>> reversed_PadList(descriptor.m_Parameters.m_PadList.size());
+
+    std::reverse_copy(std::begin(descriptor.m_Parameters.m_PadList),
+                      std::end(descriptor.m_Parameters.m_PadList),
+                      std::begin(reversed_PadList));
+
+    arm_compute::PaddingList padList = static_cast<arm_compute::PaddingList>(reversed_PadList);
+
+    auto layer = std::make_unique<arm_compute::NEPadLayer>();
+    layer->configure(&input, &output, padList);
+    m_Layer.reset(layer.release());
+}
+
+void NeonPadWorkload::Execute() const
+{
+    ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonPadWorkload_Execute");
+    m_Layer->run();
+}
+
+arm_compute::Status NeonPadWorkloadValidate(const TensorInfo& input,
+                                            const TensorInfo& output,
+                                            const PadDescriptor& descriptor)
+{
+    const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input);
+    const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output);
+    arm_compute::PaddingList padList = static_cast<arm_compute::PaddingList>(descriptor.m_PadList);
+
+    return arm_compute::NEPadLayer::validate(&aclInputInfo, &aclOutputInfo, padList);
+}
+
+} // namespace armnn
diff --git a/src/backends/neon/workloads/NeonPadWorkload.hpp b/src/backends/neon/workloads/NeonPadWorkload.hpp
new file mode 100644 (file)
index 0000000..ab0e821
--- /dev/null
@@ -0,0 +1,30 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <backendsCommon/WorkloadData.hpp>
+#include <backendsCommon/Workload.hpp>
+#include <arm_compute/runtime/IFunction.h>
+#include <arm_compute/core/Error.h>
+
+namespace armnn {
+
+class NeonPadWorkload : public BaseWorkload<PadQueueDescriptor>
+{
+public:
+    NeonPadWorkload(const PadQueueDescriptor& descriptor, const WorkloadInfo& info);
+
+    void Execute() const override;
+
+private:
+    std::unique_ptr<arm_compute::IFunction> m_Layer;
+};
+
+arm_compute::Status NeonPadWorkloadValidate(const TensorInfo& input,
+                                            const TensorInfo& output,
+                                            const PadDescriptor& descriptor);
+
+} //namespace armnn
index e034cc9..e4f4fcf 100644 (file)
@@ -22,6 +22,7 @@
 #include "NeonMinimumWorkload.hpp"
 #include "NeonMultiplicationFloatWorkload.hpp"
 #include "NeonNormalizationFloatWorkload.hpp"
+#include "NeonPadWorkload.hpp"
 #include "NeonPermuteWorkload.hpp"
 #include "NeonPooling2dWorkload.hpp"
 #include "NeonReshapeWorkload.hpp"