IVGCVSW-3297 Add Neon backend support for ResizeNearestNeighbour
authorEllen Norris-Thompson <ellen.norris-thompson@arm.com>
Mon, 15 Jul 2019 13:23:30 +0000 (14:23 +0100)
committerNarumol Prangnawarat <narumol.prangnawarat@arm.com>
Wed, 17 Jul 2019 16:36:30 +0000 (16:36 +0000)
 * Renamed NeonResizeBilinearWorkload.* to NeonResizeWorkload.* and added support for ResizeNearestNeighbour
 * Added CreateWorkload and LayerTests for Neon backend

Signed-off-by: Ellen Norris-Thompson <ellen.norris-thompson@arm.com>
Change-Id: I72f5340608a0928f8b32a41d1915ee2c35849f18

src/backends/neon/NeonLayerSupport.cpp
src/backends/neon/NeonWorkloadFactory.cpp
src/backends/neon/backend.mk
src/backends/neon/test/NeonCreateWorkloadTests.cpp
src/backends/neon/test/NeonLayerTests.cpp
src/backends/neon/workloads/CMakeLists.txt
src/backends/neon/workloads/NeonResizeBilinearWorkload.cpp [deleted file]
src/backends/neon/workloads/NeonResizeBilinearWorkload.hpp [deleted file]
src/backends/neon/workloads/NeonResizeWorkload.cpp [new file with mode: 0644]
src/backends/neon/workloads/NeonResizeWorkload.hpp [new file with mode: 0644]
src/backends/neon/workloads/NeonWorkloads.hpp

index ea875f6..cb709c3 100644 (file)
@@ -39,7 +39,7 @@
 #include "workloads/NeonPooling2dWorkload.hpp"
 #include "workloads/NeonPreluWorkload.hpp"
 #include "workloads/NeonQuantizeWorkload.hpp"
-#include "workloads/NeonResizeBilinearWorkload.hpp"
+#include "workloads/NeonResizeWorkload.hpp"
 #include "workloads/NeonSoftmaxBaseWorkload.hpp"
 #include "workloads/NeonSpaceToDepthWorkload.hpp"
 #include "workloads/NeonSplitterWorkload.hpp"
@@ -511,25 +511,26 @@ bool NeonLayerSupport::IsResizeSupported(const TensorInfo& input,
                                          const ResizeDescriptor& descriptor,
                                          Optional<std::string&> reasonIfUnsupported) const
 {
-    if (descriptor.m_Method == ResizeMethod::Bilinear)
-    {
-        FORWARD_WORKLOAD_VALIDATE_FUNC(NeonResizeBilinearWorkloadValidate,
-                                       reasonIfUnsupported,
-                                       input,
-                                       output);
-    }
-
-    return false;
+    FORWARD_WORKLOAD_VALIDATE_FUNC(NeonResizeWorkloadValidate,
+                                   reasonIfUnsupported,
+                                   input,
+                                   output,
+                                   descriptor);
 }
 
 bool NeonLayerSupport::IsResizeBilinearSupported(const TensorInfo& input,
                                                  const TensorInfo& output,
                                                  Optional<std::string&> reasonIfUnsupported) const
 {
-    FORWARD_WORKLOAD_VALIDATE_FUNC(NeonResizeBilinearWorkloadValidate,
-                                   reasonIfUnsupported,
-                                   input,
-                                   output);
+    ResizeDescriptor descriptor;
+    descriptor.m_Method     = ResizeMethod::Bilinear;
+    descriptor.m_DataLayout = DataLayout::NCHW;
+
+    const TensorShape& outputShape = output.GetShape();
+    descriptor.m_TargetHeight = outputShape[2];
+    descriptor.m_TargetWidth  = outputShape[3];
+
+    return IsResizeSupported(input, output, descriptor, reasonIfUnsupported);
 }
 
 bool NeonLayerSupport::IsSoftmaxSupported(const TensorInfo& input,
index eadd636..c50eaec 100644 (file)
@@ -227,27 +227,22 @@ std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateMemCopy(const MemCo
 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateResize(const ResizeQueueDescriptor& descriptor,
                                                              const WorkloadInfo& info) const
 {
-    if (descriptor.m_Parameters.m_Method == ResizeMethod::Bilinear)
-    {
-        ResizeBilinearQueueDescriptor resizeBilinearDescriptor;
-        resizeBilinearDescriptor.m_Inputs  = descriptor.m_Inputs;
-        resizeBilinearDescriptor.m_Outputs = descriptor.m_Outputs;
-
-        resizeBilinearDescriptor.m_Parameters.m_DataLayout   = descriptor.m_Parameters.m_DataLayout;
-        resizeBilinearDescriptor.m_Parameters.m_TargetWidth  = descriptor.m_Parameters.m_TargetWidth;
-        resizeBilinearDescriptor.m_Parameters.m_TargetHeight = descriptor.m_Parameters.m_TargetHeight;
-
-        return std::make_unique<NeonResizeBilinearWorkload>(resizeBilinearDescriptor, info);
-    }
-
-    return MakeWorkloadHelper<NullWorkload, NullWorkload>(descriptor, info);
+    return std::make_unique<NeonResizeWorkload>(descriptor, info);
 }
 
 std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateResizeBilinear(
     const ResizeBilinearQueueDescriptor& descriptor,
     const WorkloadInfo& info) const
 {
-    return std::make_unique<NeonResizeBilinearWorkload>(descriptor, info);
+    ResizeQueueDescriptor resizeDescriptor;
+    resizeDescriptor.m_Inputs  = descriptor.m_Inputs;
+    resizeDescriptor.m_Outputs = descriptor.m_Outputs;
+
+    resizeDescriptor.m_Parameters.m_DataLayout   = descriptor.m_Parameters.m_DataLayout;
+    resizeDescriptor.m_Parameters.m_TargetWidth  = descriptor.m_Parameters.m_TargetWidth;
+    resizeDescriptor.m_Parameters.m_TargetHeight = descriptor.m_Parameters.m_TargetHeight;
+
+    return CreateResize(resizeDescriptor, info);
 }
 
 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateFakeQuantization(
index 9b0c188..305abfc 100644 (file)
@@ -45,7 +45,7 @@ BACKEND_SOURCES := \
         workloads/NeonPreluWorkload.cpp \
         workloads/NeonQuantizeWorkload.cpp \
         workloads/NeonReshapeWorkload.cpp \
-        workloads/NeonResizeBilinearWorkload.cpp \
+        workloads/NeonResizeWorkload.cpp \
         workloads/NeonSoftmaxBaseWorkload.cpp \
         workloads/NeonSoftmaxFloatWorkload.cpp \
         workloads/NeonSoftmaxUint8Workload.cpp \
index 49c5a72..ac7eb25 100644 (file)
@@ -19,6 +19,12 @@ BOOST_AUTO_TEST_SUITE(CreateWorkloadNeon)
 namespace
 {
 
+boost::test_tools::predicate_result CompareIAclTensorHandleShape(IAclTensorHandle*                    tensorHandle,
+                                                                std::initializer_list<unsigned int> expectedDimensions)
+{
+    return CompareTensorHandleShape<IAclTensorHandle>(tensorHandle, expectedDimensions);
+}
+
 bool TestNeonTensorHandleInfo(armnn::IAclTensorHandle* handle, const armnn::TensorInfo& expectedInfo)
 {
     using namespace armnn::armcomputetensorutils;
@@ -493,6 +499,52 @@ BOOST_AUTO_TEST_CASE(CreateReshapeUint8Workload)
     NeonCreateReshapeWorkloadTest<DataType::QuantisedAsymm8>();
 }
 
+template <typename ResizeWorkloadType, armnn::DataType DataType>
+static void NeonCreateResizeWorkloadTest(DataLayout dataLayout)
+{
+    Graph graph;
+    NeonWorkloadFactory factory =
+            NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager());
+    auto workload = CreateResizeBilinearWorkloadTest<ResizeWorkloadType, DataType>(factory, graph, dataLayout);
+
+    auto queueDescriptor = workload->GetData();
+
+    auto inputHandle  = boost::polymorphic_downcast<IAclTensorHandle*>(queueDescriptor.m_Inputs[0]);
+    auto outputHandle = boost::polymorphic_downcast<IAclTensorHandle*>(queueDescriptor.m_Outputs[0]);
+
+    switch (dataLayout)
+    {
+        case DataLayout::NHWC:
+            BOOST_TEST(CompareIAclTensorHandleShape(inputHandle, { 2, 4, 4, 3 }));
+            BOOST_TEST(CompareIAclTensorHandleShape(outputHandle, { 2, 2, 2, 3 }));
+            break;
+        case DataLayout::NCHW:
+        default:
+            BOOST_TEST(CompareIAclTensorHandleShape(inputHandle, { 2, 3, 4, 4 }));
+            BOOST_TEST(CompareIAclTensorHandleShape(outputHandle, { 2, 3, 2, 2 }));
+    }
+}
+
+BOOST_AUTO_TEST_CASE(CreateResizeFloat32NchwWorkload)
+{
+    NeonCreateResizeWorkloadTest<NeonResizeWorkload, armnn::DataType::Float32>(DataLayout::NCHW);
+}
+
+BOOST_AUTO_TEST_CASE(CreateResizeUint8NchwWorkload)
+{
+    NeonCreateResizeWorkloadTest<NeonResizeWorkload, armnn::DataType::QuantisedAsymm8>(DataLayout::NCHW);
+}
+
+BOOST_AUTO_TEST_CASE(CreateResizeFloat32NhwcWorkload)
+{
+    NeonCreateResizeWorkloadTest<NeonResizeWorkload, armnn::DataType::Float32>(DataLayout::NHWC);
+}
+
+BOOST_AUTO_TEST_CASE(CreateResizeUint8NhwcWorkload)
+{
+    NeonCreateResizeWorkloadTest<NeonResizeWorkload, armnn::DataType::QuantisedAsymm8>(DataLayout::NHWC);
+}
+
 template <typename SoftmaxWorkloadType, typename armnn::DataType DataType>
 static void NeonCreateSoftmaxWorkloadTest()
 {
index 586e994..aeebd4f 100644 (file)
@@ -616,6 +616,72 @@ ARMNN_AUTO_TEST_CASE(ResizeBilinearMagUint8Nhwc,
                      ResizeBilinearMagTest<armnn::DataType::QuantisedAsymm8>,
                      armnn::DataLayout::NHWC)
 
+// Resize NearestNeighbor - NCHW
+ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighbor,
+                     SimpleResizeNearestNeighborTest<armnn::DataType::Float32>,
+                     armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborNop,
+                     ResizeNearestNeighborNopTest<armnn::DataType::Float32>,
+                     armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMin,
+                     ResizeNearestNeighborSqMinTest<armnn::DataType::Float32>,
+                     armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMin,
+                     ResizeNearestNeighborMinTest<armnn::DataType::Float32>,
+                     armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMag,
+                     ResizeNearestNeighborMagTest<armnn::DataType::Float32>,
+                     armnn::DataLayout::NCHW, 0.1f, 50, 0.1f, 50)
+
+ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighborUint8,
+                     SimpleResizeNearestNeighborTest<armnn::DataType::QuantisedAsymm8>,
+                     armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborNopUint8,
+                     ResizeNearestNeighborNopTest<armnn::DataType::QuantisedAsymm8>,
+                     armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMinUint8,
+                     ResizeNearestNeighborSqMinTest<armnn::DataType::QuantisedAsymm8>,
+                     armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMinUint8,
+                     ResizeNearestNeighborMinTest<armnn::DataType::QuantisedAsymm8>,
+                     armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMagUint8,
+                     ResizeNearestNeighborMagTest<armnn::DataType::QuantisedAsymm8>,
+                     armnn::DataLayout::NCHW, 0.1f, 50, 0.1f, 50)
+
+// Resize NearestNeighbor - NHWC
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborNopNhwc,
+                     ResizeNearestNeighborNopTest<armnn::DataType::Float32>,
+                     armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighborNhwc,
+                     SimpleResizeNearestNeighborTest<armnn::DataType::Float32>,
+                     armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMinNhwc,
+                     ResizeNearestNeighborSqMinTest<armnn::DataType::Float32>,
+                     armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMinNhwc,
+                     ResizeNearestNeighborMinTest<armnn::DataType::Float32>,
+                     armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMagNhwc,
+                     ResizeNearestNeighborMagTest<armnn::DataType::Float32>,
+                     armnn::DataLayout::NHWC, 0.1f, 50, 0.1f, 50)
+
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborNopUint8Nhwc,
+                     ResizeNearestNeighborNopTest<armnn::DataType::QuantisedAsymm8>,
+                     armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighborUint8Nhwc,
+                     SimpleResizeNearestNeighborTest<armnn::DataType::QuantisedAsymm8>,
+                     armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMinUint8Nhwc,
+                     ResizeNearestNeighborSqMinTest<armnn::DataType::QuantisedAsymm8>,
+                     armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMinUint8Nhwc,
+                     ResizeNearestNeighborMinTest<armnn::DataType::QuantisedAsymm8>,
+                     armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMagUint8Nhwc,
+                     ResizeNearestNeighborMagTest<armnn::DataType::QuantisedAsymm8>,
+                     armnn::DataLayout::NHWC, 0.1f, 50, 0.1f, 50)
+
 // Quantize
 ARMNN_AUTO_TEST_CASE(QuantizeSimpleUint8, QuantizeSimpleUint8Test)
 ARMNN_AUTO_TEST_CASE(QuantizeClampUint8, QuantizeClampUint8Test)
index 0ad961a..7bde808 100644 (file)
@@ -56,8 +56,8 @@ list(APPEND armnnNeonBackendWorkloads_sources
     NeonQuantizeWorkload.hpp
     NeonReshapeWorkload.cpp
     NeonReshapeWorkload.hpp
-    NeonResizeBilinearWorkload.cpp
-    NeonResizeBilinearWorkload.hpp
+    NeonResizeWorkload.cpp
+    NeonResizeWorkload.hpp
     NeonSoftmaxBaseWorkload.cpp
     NeonSoftmaxBaseWorkload.hpp
     NeonSoftmaxFloatWorkload.cpp
diff --git a/src/backends/neon/workloads/NeonResizeBilinearWorkload.cpp b/src/backends/neon/workloads/NeonResizeBilinearWorkload.cpp
deleted file mode 100644 (file)
index 6943d87..0000000
+++ /dev/null
@@ -1,59 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "NeonResizeBilinearWorkload.hpp"
-
-#include <aclCommon/ArmComputeUtils.hpp>
-#include <aclCommon/ArmComputeTensorUtils.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
-#include <neon/NeonTensorHandle.hpp>
-#include <neon/NeonLayerSupport.hpp>
-
-using namespace armnn::armcomputetensorutils;
-
-namespace armnn
-{
-
-arm_compute::Status NeonResizeBilinearWorkloadValidate(const TensorInfo& input, const TensorInfo& output)
-{
-    const arm_compute::TensorInfo aclInputInfo  = armcomputetensorutils::BuildArmComputeTensorInfo(input);
-    const arm_compute::TensorInfo aclOutputInfo = armcomputetensorutils::BuildArmComputeTensorInfo(output);
-
-    return arm_compute::NEScale::validate(&aclInputInfo,
-                                          &aclOutputInfo,
-                                          arm_compute::InterpolationPolicy::BILINEAR,
-                                          arm_compute::BorderMode::REPLICATE,
-                                          arm_compute::PixelValue(0.f),
-                                          arm_compute::SamplingPolicy::TOP_LEFT);
-}
-
-NeonResizeBilinearWorkload::NeonResizeBilinearWorkload(const ResizeBilinearQueueDescriptor& descriptor,
-                                                       const WorkloadInfo& info)
-    : BaseWorkload<ResizeBilinearQueueDescriptor>(descriptor, info)
-{
-    m_Data.ValidateInputsOutputs("NeonResizeBilinearWorkload", 1, 1);
-
-    arm_compute::ITensor& input = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
-    arm_compute::ITensor& output = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
-
-    arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout);
-    input.info()->set_data_layout(aclDataLayout);
-    output.info()->set_data_layout(aclDataLayout);
-
-    m_ResizeBilinearLayer.configure(&input,
-                                    &output,
-                                    arm_compute::InterpolationPolicy::BILINEAR,
-                                    arm_compute::BorderMode::REPLICATE,
-                                    arm_compute::PixelValue(0.f),
-                                    arm_compute::SamplingPolicy::TOP_LEFT);
-};
-
-void NeonResizeBilinearWorkload::Execute() const
-{
-    ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonResizeBilinearWorkload_Execute");
-    m_ResizeBilinearLayer.run();
-}
-
-} //namespace armnn
diff --git a/src/backends/neon/workloads/NeonResizeBilinearWorkload.hpp b/src/backends/neon/workloads/NeonResizeBilinearWorkload.hpp
deleted file mode 100644 (file)
index 21753b3..0000000
+++ /dev/null
@@ -1,29 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include <backendsCommon/Workload.hpp>
-
-#include <neon/workloads/NeonWorkloadUtils.hpp>
-
-#include <arm_compute/runtime/NEON/functions/NEScale.h>
-
-namespace armnn
-{
-
-arm_compute::Status NeonResizeBilinearWorkloadValidate(const TensorInfo& input, const TensorInfo& output);
-
-class NeonResizeBilinearWorkload : public BaseWorkload<ResizeBilinearQueueDescriptor>
-{
-public:
-    NeonResizeBilinearWorkload(const ResizeBilinearQueueDescriptor& descriptor, const WorkloadInfo& info);
-    void Execute() const override;
-
-private:
-    mutable arm_compute::NEScale m_ResizeBilinearLayer;
-};
-
-} //namespace armnn
diff --git a/src/backends/neon/workloads/NeonResizeWorkload.cpp b/src/backends/neon/workloads/NeonResizeWorkload.cpp
new file mode 100644 (file)
index 0000000..a4e4a4a
--- /dev/null
@@ -0,0 +1,72 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "NeonResizeWorkload.hpp"
+
+#include "NeonWorkloadUtils.hpp"
+
+#include <aclCommon/ArmComputeUtils.hpp>
+#include <aclCommon/ArmComputeTensorUtils.hpp>
+#include <backendsCommon/CpuTensorHandle.hpp>
+#include <neon/NeonTensorHandle.hpp>
+
+using namespace armnn::armcomputetensorutils;
+
+namespace armnn
+{
+
+arm_compute::Status NeonResizeWorkloadValidate(const TensorInfo& input,
+                                               const TensorInfo& output,
+                                               const ResizeDescriptor& descriptor)
+{
+    arm_compute::TensorInfo aclInputInfo  = BuildArmComputeTensorInfo(input);
+    arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output);
+
+    arm_compute::DataLayout aclDataLayout = ConvertDataLayout(descriptor.m_DataLayout);
+    aclInputInfo.set_data_layout(aclDataLayout);
+    aclOutputInfo.set_data_layout(aclDataLayout);
+
+    arm_compute::InterpolationPolicy aclInterpolationPolicy =
+            ConvertResizeMethodToAclInterpolationPolicy(descriptor.m_Method);
+
+    return arm_compute::NEScale::validate(&aclInputInfo,
+                                          &aclOutputInfo,
+                                          aclInterpolationPolicy,
+                                          arm_compute::BorderMode::REPLICATE,
+                                          arm_compute::PixelValue(0.f),
+                                          arm_compute::SamplingPolicy::TOP_LEFT);
+}
+
+NeonResizeWorkload::NeonResizeWorkload(const ResizeQueueDescriptor& descriptor,
+                                                       const WorkloadInfo& info)
+    : BaseWorkload<ResizeQueueDescriptor>(descriptor, info)
+{
+    m_Data.ValidateInputsOutputs("NeonResizeWorkload", 1, 1);
+
+    arm_compute::ITensor& input = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+    arm_compute::ITensor& output = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+
+    arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout);
+    input.info()->set_data_layout(aclDataLayout);
+    output.info()->set_data_layout(aclDataLayout);
+
+    arm_compute::InterpolationPolicy aclInterpolationPolicy =
+            ConvertResizeMethodToAclInterpolationPolicy(descriptor.m_Parameters.m_Method);
+
+    m_ResizeLayer.configure(&input,
+                            &output,
+                            aclInterpolationPolicy,
+                            arm_compute::BorderMode::REPLICATE,
+                            arm_compute::PixelValue(0.f),
+                            arm_compute::SamplingPolicy::TOP_LEFT);
+};
+
+void NeonResizeWorkload::Execute() const
+{
+    ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonResizeWorkload_Execute");
+    m_ResizeLayer.run();
+}
+
+} //namespace armnn
diff --git a/src/backends/neon/workloads/NeonResizeWorkload.hpp b/src/backends/neon/workloads/NeonResizeWorkload.hpp
new file mode 100644 (file)
index 0000000..69995c6
--- /dev/null
@@ -0,0 +1,29 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <backendsCommon/Workload.hpp>
+
+#include <arm_compute/runtime/NEON/functions/NEScale.h>
+
+namespace armnn
+{
+
+arm_compute::Status NeonResizeWorkloadValidate(const TensorInfo& input,
+                                               const TensorInfo& output,
+                                               const ResizeDescriptor& descriptor);
+
+class NeonResizeWorkload : public BaseWorkload<ResizeQueueDescriptor>
+{
+public:
+    NeonResizeWorkload(const ResizeQueueDescriptor& descriptor, const WorkloadInfo& info);
+    void Execute() const override;
+
+private:
+    mutable arm_compute::NEScale m_ResizeLayer;
+};
+
+} //namespace armnn
index a9604a1..c5f4a54 100644 (file)
@@ -30,7 +30,7 @@
 #include "NeonPreluWorkload.hpp"
 #include "NeonQuantizeWorkload.hpp"
 #include "NeonReshapeWorkload.hpp"
-#include "NeonResizeBilinearWorkload.hpp"
+#include "NeonResizeWorkload.hpp"
 #include "NeonSoftmaxFloatWorkload.hpp"
 #include "NeonSoftmaxUint8Workload.hpp"
 #include "NeonSpaceToDepthWorkload.hpp"