IVGCVSW-1048 RESIZE_BILINEAR NEON operator
authorSadik Armagan <sadik.armagan@arm.com>
Mon, 17 Dec 2018 11:32:16 +0000 (11:32 +0000)
committerMatthew Bentham <matthew.bentham@arm.com>
Wed, 23 Jan 2019 15:27:22 +0000 (15:27 +0000)
 * Implemented NeonResizeBilinearWorkload
 * Enable ResizeBilinear Operator unit tests for Neon

!android-nn-driver:405

Change-Id: Iec3100ccaf7d246e8eaf683d1f3ec9191df5241e

19 files changed:
include/armnn/ILayerSupport.hpp
include/armnn/LayerSupport.hpp
src/armnn/LayerSupport.cpp
src/backends/backendsCommon/LayerSupportBase.cpp
src/backends/backendsCommon/LayerSupportBase.hpp
src/backends/backendsCommon/WorkloadFactory.cpp
src/backends/cl/ClLayerSupport.cpp
src/backends/cl/ClLayerSupport.hpp
src/backends/neon/NeonLayerSupport.cpp
src/backends/neon/NeonLayerSupport.hpp
src/backends/neon/NeonWorkloadFactory.cpp
src/backends/neon/backend.mk
src/backends/neon/test/NeonLayerTests.cpp
src/backends/neon/workloads/CMakeLists.txt
src/backends/neon/workloads/NeonResizeBilinearWorkload.cpp [new file with mode: 0644]
src/backends/neon/workloads/NeonResizeBilinearWorkload.hpp [new file with mode: 0644]
src/backends/neon/workloads/NeonWorkloads.hpp
src/backends/reference/RefLayerSupport.cpp
src/backends/reference/RefLayerSupport.hpp

index d9e3d0a..a603229 100644 (file)
@@ -213,6 +213,7 @@ public:
                                            Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
 
     virtual bool IsResizeBilinearSupported(const TensorInfo& input,
+                                           const TensorInfo& output,
                                            Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
 
     virtual bool IsRsqrtSupported(const TensorInfo& input,
index 3cf53dd..b95b961 100644 (file)
@@ -275,6 +275,7 @@ bool IsReshapeSupported(const BackendId& backend,
 /// Deprecated in favor of IBackend and ILayerSupport interfaces
 bool IsResizeBilinearSupported(const BackendId& backend,
                                const TensorInfo& input,
+                               const TensorInfo& output,
                                char* reasonIfUnsupported = nullptr,
                                size_t reasonIfUnsupportedMaxLength = 1024);
 
index b600e4d..2eaf780 100644 (file)
@@ -431,10 +431,11 @@ bool IsReshapeSupported(const BackendId& backend,
 
 bool IsResizeBilinearSupported(const BackendId& backend,
                                const TensorInfo& input,
+                               const TensorInfo& output,
                                char* reasonIfUnsupported,
                                size_t reasonIfUnsupportedMaxLength)
 {
-    FORWARD_LAYER_SUPPORT_FUNC(backend, IsResizeBilinearSupported, input);
+    FORWARD_LAYER_SUPPORT_FUNC(backend, IsResizeBilinearSupported, input, output);
 }
 
 bool IsRsqrtSupported(const BackendId& backend,
index 9d68d35..75790dc 100644 (file)
@@ -316,6 +316,7 @@ bool LayerSupportBase::IsReshapeSupported(const TensorInfo& input,
 }
 
 bool LayerSupportBase::IsResizeBilinearSupported(const TensorInfo& input,
+                                                 const TensorInfo& output,
                                                  Optional<std::string&> reasonIfUnsupported) const
 {
     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
index 5a71b44..c3acdca 100644 (file)
@@ -199,6 +199,7 @@ public:
                             Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
     bool IsResizeBilinearSupported(const TensorInfo& input,
+                                   const TensorInfo& output,
                                    Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
     bool IsRsqrtSupported(const TensorInfo& input,
index a70ec7e..38a2402 100644 (file)
@@ -620,7 +620,10 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId,
         case LayerType::ResizeBilinear:
         {
             const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
-            result = layerSupportObject->IsResizeBilinearSupported(OverrideDataType(input, dataType), reason);
+            const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
+            result = layerSupportObject->IsResizeBilinearSupported(OverrideDataType(input, dataType),
+                                                                   OverrideDataType(output, dataType),
+                                                                   reason);
             break;
         }
         case LayerType::Rsqrt:
index c1139e2..cb03e8b 100644 (file)
@@ -498,8 +498,10 @@ bool ClLayerSupport::IsReshapeSupported(const TensorInfo& input,
 }
 
 bool ClLayerSupport::IsResizeBilinearSupported(const TensorInfo& input,
+                                               const TensorInfo& output,
                                                Optional<std::string&> reasonIfUnsupported) const
 {
+    ignore_unused(output);
     return IsSupportedForDataTypeCl(reasonIfUnsupported,
                                     input.GetDataType(),
                                     &TrueFunc<>,
index b06e6a9..07e2433 100644 (file)
@@ -169,6 +169,7 @@ public:
                             Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
     bool IsResizeBilinearSupported(const TensorInfo& input,
+                                   const TensorInfo& output,
                                    Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
     bool IsSoftmaxSupported(const TensorInfo& input,
index 1f205ed..76cdf14 100644 (file)
@@ -32,6 +32,7 @@
 #include "workloads/NeonFullyConnectedWorkload.hpp"
 #include "workloads/NeonPermuteWorkload.hpp"
 #include "workloads/NeonPooling2dWorkload.hpp"
+#include "workloads/NeonResizeBilinearWorkload.hpp"
 #include "workloads/NeonSoftmaxBaseWorkload.hpp"
 #include "workloads/NeonSubtractionFloatWorkload.hpp"
 #endif
@@ -374,6 +375,16 @@ bool NeonLayerSupport::IsReshapeSupported(const TensorInfo& input,
                                       &TrueFunc<>);
 }
 
+bool NeonLayerSupport::IsResizeBilinearSupported(const TensorInfo& input,
+                                                 const TensorInfo& output,
+                                                 Optional<std::string&> reasonIfUnsupported) const
+{
+    FORWARD_WORKLOAD_VALIDATE_FUNC(NeonResizeBilinearWorkloadValidate,
+                                   reasonIfUnsupported,
+                                   input,
+                                   output);
+}
+
 bool NeonLayerSupport::IsSoftmaxSupported(const TensorInfo& input,
                                           const TensorInfo& output,
                                           const SoftmaxDescriptor& descriptor,
index c522c6e..6316324 100644 (file)
@@ -126,6 +126,10 @@ public:
                             const ReshapeDescriptor& descriptor,
                             Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    bool IsResizeBilinearSupported(const TensorInfo& input,
+                                   const TensorInfo& output,
+                                   Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
     bool IsSoftmaxSupported(const TensorInfo& input,
                             const TensorInfo& output,
                             const SoftmaxDescriptor& descriptor,
index 311479a..e7fac97 100644 (file)
@@ -203,7 +203,7 @@ std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateResizeBilinear(
     const ResizeBilinearQueueDescriptor& descriptor,
     const WorkloadInfo& info) const
 {
-    return nullptr;
+    return std::make_unique<NeonResizeBilinearWorkload>(descriptor, info);
 }
 
 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateFakeQuantization(
index 2e3848c..1ebeae5 100644 (file)
@@ -34,6 +34,7 @@ BACKEND_SOURCES := \
         workloads/NeonPermuteWorkload.cpp \
         workloads/NeonPooling2dWorkload.cpp \
         workloads/NeonReshapeWorkload.cpp \
+        workloads/NeonResizeBilinearWorkload.cpp \
         workloads/NeonSoftmaxBaseWorkload.cpp \
         workloads/NeonSoftmaxFloatWorkload.cpp \
         workloads/NeonSoftmaxUint8Workload.cpp \
index 3d34934..6975374 100644 (file)
@@ -434,6 +434,20 @@ ARMNN_AUTO_TEST_CASE(SimpleNormalizationAcross, SimpleNormalizationAcrossTest)
 ARMNN_AUTO_TEST_CASE(SimpleNormalizationWithin, SimpleNormalizationWithinTest)
 ARMNN_AUTO_TEST_CASE(SimpleNormalizationAcrossNhwc, SimpleNormalizationAcrossNhwcTest)
 
+// Resize Bilinear - NCHW data layout
+ARMNN_AUTO_TEST_CASE(ResizeBilinearNop, ResizeBilinearNopTest, armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(SimpleResizeBilinear, SimpleResizeBilinearTest, armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMin, ResizeBilinearSqMinTest, armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearMin, ResizeBilinearMinTest, armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearMag, ResizeBilinearMagTest, armnn::DataLayout::NCHW)
+
+// Resize Bilinear - NHWC data layout
+ARMNN_AUTO_TEST_CASE(ResizeBilinearNopNhwc, ResizeBilinearNopTest, armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearNhwc, SimpleResizeBilinearTest, armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinNhwc, ResizeBilinearSqMinTest, armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearMinNhwc, ResizeBilinearMinTest, armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearMagNhwc, ResizeBilinearMagTest, armnn::DataLayout::NHWC)
+
 // ============================================================================
 // COMPARE tests
 
index 919c716..713418d 100644 (file)
@@ -46,6 +46,8 @@ list(APPEND armnnNeonBackendWorkloads_sources
     NeonPooling2dWorkload.hpp
     NeonReshapeWorkload.cpp
     NeonReshapeWorkload.hpp
+    NeonResizeBilinearWorkload.cpp
+    NeonResizeBilinearWorkload.hpp
     NeonSoftmaxBaseWorkload.cpp
     NeonSoftmaxBaseWorkload.hpp
     NeonSoftmaxFloatWorkload.cpp
diff --git a/src/backends/neon/workloads/NeonResizeBilinearWorkload.cpp b/src/backends/neon/workloads/NeonResizeBilinearWorkload.cpp
new file mode 100644 (file)
index 0000000..37f97bf
--- /dev/null
@@ -0,0 +1,59 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "NeonResizeBilinearWorkload.hpp"
+
+#include <aclCommon/ArmComputeUtils.hpp>
+#include <aclCommon/ArmComputeTensorUtils.hpp>
+#include <backendsCommon/CpuTensorHandle.hpp>
+#include <neon/NeonTensorHandle.hpp>
+#include <neon/NeonLayerSupport.hpp>
+
+using namespace armnn::armcomputetensorutils;
+
+namespace armnn
+{
+
+arm_compute::Status NeonResizeBilinearWorkloadValidate(const TensorInfo& input, const TensorInfo& output)
+{
+    const arm_compute::TensorInfo aclInputInfo  = armcomputetensorutils::BuildArmComputeTensorInfo(input);
+    const arm_compute::TensorInfo aclOutputInfo = armcomputetensorutils::BuildArmComputeTensorInfo(output);
+
+    return arm_compute::NEScale::validate(&aclInputInfo,
+                                          &aclOutputInfo,
+                                          arm_compute::InterpolationPolicy::BILINEAR,
+                                          arm_compute::BorderMode::REPLICATE,
+                                          arm_compute::PixelValue(0.f),
+                                          arm_compute::SamplingPolicy::TOP_LEFT);
+}
+
+NeonResizeBilinearWorkload::NeonResizeBilinearWorkload(const ResizeBilinearQueueDescriptor& descriptor,
+                                                       const WorkloadInfo& info)
+    : BaseWorkload<ResizeBilinearQueueDescriptor>(descriptor, info)
+{
+    m_Data.ValidateInputsOutputs("NeonResizeBilinearWorkload", 1, 1);
+
+    arm_compute::ITensor& input = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+    arm_compute::ITensor& output = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+
+    arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout);
+    input.info()->set_data_layout(aclDataLayout);
+    output.info()->set_data_layout(aclDataLayout);
+
+    m_ResizeBilinearLayer.configure(&input,
+                                    &output,
+                                    arm_compute::InterpolationPolicy::BILINEAR,
+                                    arm_compute::BorderMode::REPLICATE,
+                                    arm_compute::PixelValue(0.f),
+                                    arm_compute::SamplingPolicy::TOP_LEFT);
+};
+
+void NeonResizeBilinearWorkload::Execute() const
+{
+    ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonResizeBilinearWorkload_Execute");
+    m_ResizeBilinearLayer.run();
+}
+
+} //namespace armnn
diff --git a/src/backends/neon/workloads/NeonResizeBilinearWorkload.hpp b/src/backends/neon/workloads/NeonResizeBilinearWorkload.hpp
new file mode 100644 (file)
index 0000000..21753b3
--- /dev/null
@@ -0,0 +1,29 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <backendsCommon/Workload.hpp>
+
+#include <neon/workloads/NeonWorkloadUtils.hpp>
+
+#include <arm_compute/runtime/NEON/functions/NEScale.h>
+
+namespace armnn
+{
+
+arm_compute::Status NeonResizeBilinearWorkloadValidate(const TensorInfo& input, const TensorInfo& output);
+
+class NeonResizeBilinearWorkload : public BaseWorkload<ResizeBilinearQueueDescriptor>
+{
+public:
+    NeonResizeBilinearWorkload(const ResizeBilinearQueueDescriptor& descriptor, const WorkloadInfo& info);
+    void Execute() const override;
+
+private:
+    mutable arm_compute::NEScale m_ResizeBilinearLayer;
+};
+
+} //namespace armnn
index 70f9e37..e034cc9 100644 (file)
@@ -25,6 +25,7 @@
 #include "NeonPermuteWorkload.hpp"
 #include "NeonPooling2dWorkload.hpp"
 #include "NeonReshapeWorkload.hpp"
+#include "NeonResizeBilinearWorkload.hpp"
 #include "NeonSoftmaxFloatWorkload.hpp"
 #include "NeonSoftmaxUint8Workload.hpp"
 #include "NeonSplitterWorkload.hpp"
index ce81f8d..25c2baf 100644 (file)
@@ -513,8 +513,10 @@ bool RefLayerSupport::IsReshapeSupported(const TensorInfo& input,
 }
 
 bool RefLayerSupport::IsResizeBilinearSupported(const TensorInfo& input,
+                                                const TensorInfo& output,
                                                 Optional<std::string&> reasonIfUnsupported) const
 {
+    ignore_unused(output);
     return IsSupportedForDataTypeRef(reasonIfUnsupported,
                                      input.GetDataType(),
                                      &TrueFunc<>,
index 01abc73..5fe111b 100644 (file)
@@ -193,6 +193,7 @@ public:
                             Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
     bool IsResizeBilinearSupported(const TensorInfo& input,
+                                   const TensorInfo& output,
                                    Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
     bool IsRsqrtSupported(const TensorInfo& input,