IVGCVSW-3296 Add CL backend support for ResizeNearestNeighbour
authorAron Virginas-Tar <Aron.Virginas-Tar@arm.com>
Tue, 2 Jul 2019 16:25:47 +0000 (17:25 +0100)
committerÁron Virginás-Tar <aron.virginas-tar@arm.com>
Fri, 12 Jul 2019 14:44:27 +0000 (14:44 +0000)
Signed-off-by: Aron Virginas-Tar <Aron.Virginas-Tar@arm.com>
Change-Id: I7f4c722141837939fd8904c52e75704a15c8a5e3

14 files changed:
src/backends/aclCommon/ArmComputeUtils.hpp
src/backends/backendsCommon/test/LayerTests.hpp
src/backends/cl/ClLayerSupport.cpp
src/backends/cl/ClWorkloadFactory.cpp
src/backends/cl/backend.mk
src/backends/cl/test/ClCreateWorkloadTests.cpp
src/backends/cl/test/ClLayerTests.cpp
src/backends/cl/workloads/CMakeLists.txt
src/backends/cl/workloads/ClResizeBilinearFloatWorkload.cpp [deleted file]
src/backends/cl/workloads/ClResizeBilinearFloatWorkload.hpp [deleted file]
src/backends/cl/workloads/ClResizeWorkload.cpp [new file with mode: 0644]
src/backends/cl/workloads/ClResizeWorkload.hpp [new file with mode: 0644]
src/backends/cl/workloads/ClWorkloads.hpp
src/backends/reference/test/RefLayerTests.cpp

index 5b8f983..0f56160 100644 (file)
@@ -122,6 +122,19 @@ ConvertFullyConnectedDescriptorToAclFullyConnectedLayerInfo(const FullyConnected
     return fc_info;
 }
 
+inline arm_compute::InterpolationPolicy ConvertResizeMethodToAclInterpolationPolicy(ResizeMethod resizeMethod)
+{
+    switch (resizeMethod)
+    {
+        case ResizeMethod::Bilinear:
+            return arm_compute::InterpolationPolicy::BILINEAR;
+        case ResizeMethod::NearestNeighbor:
+            return arm_compute::InterpolationPolicy::NEAREST_NEIGHBOR;
+        default:
+            throw InvalidArgumentException("Unsupported resize method");
+    }
+}
+
 inline unsigned int ComputeSoftmaxAclAxis(const armnn::TensorInfo& tensor)
 {
     unsigned int dim = tensor.GetNumDimensions();
index 7db8471..d6747f5 100644 (file)
@@ -950,7 +950,11 @@ template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
 LayerTestResult<T, 4> ResizeNearestNeighborMagTest(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-        const armnn::DataLayout  dataLayout);
+        const armnn::DataLayout  dataLayout,
+        float inQuantScale,
+        int32_t inQuantOffset,
+        float outQuantScale,
+        int32_t outQuantOffset);
 
 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
 LayerTestResult<T, 2> Rsqrt2dTestCommon(
@@ -3391,7 +3395,11 @@ template<armnn::DataType ArmnnType, typename T>
 LayerTestResult<T, 4> ResizeNearestNeighborMagTest(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-        const armnn::DataLayout dataLayout)
+        const armnn::DataLayout dataLayout,
+        float inQuantScale,
+        int32_t inQuantOffset,
+        float outQuantScale,
+        int32_t outQuantOffset)
 {
     armnn::TensorInfo inputTensorInfo = armnn::IsQuantizedType<T>()
                                         ?  armnnUtils::GetTensorInfo(1, 1, 3, 2, dataLayout, ArmnnType)
@@ -3402,46 +3410,46 @@ LayerTestResult<T, 4> ResizeNearestNeighborMagTest(
 
     if (armnn::IsQuantizedType<T>())
     {
-        inputTensorInfo.SetQuantizationScale(0.010765f);
-        inputTensorInfo.SetQuantizationOffset(7);
-        outputTensorInfo.SetQuantizationScale(0.010132f);
-        outputTensorInfo.SetQuantizationOffset(-18);
+        inputTensorInfo.SetQuantizationScale(inQuantScale);
+        inputTensorInfo.SetQuantizationOffset(inQuantOffset);
+        outputTensorInfo.SetQuantizationScale(outQuantScale);
+        outputTensorInfo.SetQuantizationOffset(outQuantOffset);
     }
 
     std::vector<float> inputData = armnn::IsQuantizedType<T>()
                                    ? std::initializer_list<float>
-                                           {
-                                                   0.183005f, 2.379065f, // 24, 228, : Expected quantised values
-                                                   1.05497f, 1.302565f, // 105, 128,
-                                                   2.400595f, 0.68896f // 230, 71
-                                           }
+                                        {
+                                            0.183005f, 2.379065f, //  24, 228, : expected quantised values
+                                            1.054970f, 1.302565f, // 105, 128,
+                                            2.400595f, 0.688960f  // 230, 71
+                                        }
                                    : std::initializer_list<float>
-                                           {
-                                                   1.0f,   2.0f,
-                                                   13.0f,  21.0f,
-                                                   144.0f, 233.0f,
+                                        {
+                                               1.0f,   2.0f,
+                                              13.0f,  21.0f,
+                                            144.0f, 233.0f,
 
-                                                   233.0f, 144.0f,
-                                                   21.0f,  13.0f,
-                                                   2.0f,   1.0f
-                                           };
+                                            233.0f, 144.0f,
+                                             21.0f,  13.0f,
+                                              2.0f,   1.0f
+                                        };
     std::vector<float> outputData = armnn::IsQuantizedType<T>()
                                     ? std::initializer_list<float>
-                                            {
-                                                    0.183005f, 0.183005f, 0.183005f, 2.379065f, 2.379065f,
-                                                    1.05497f,  1.05497f,  1.05497f,  1.302565f, 1.302565f,
-                                                    2.400595f, 2.400595f, 2.400595f, 0.68896f,  0.68896f
-                                            }
+                                        {
+                                            0.183005f, 0.183005f, 0.183005f, 2.379065f, 2.379065f,
+                                            1.054970f, 1.054970f, 1.054970f, 1.302565f, 1.302565f,
+                                            2.400595f, 2.400595f, 2.400595f, 0.688960f, 0.688960f
+                                        }
                                     : std::initializer_list<float>
-                                            {
-                                                      1.f,   1.f,   1.f,   2.f,   2.f,
-                                                     13.f,  13.f,  13.f,  21.f,  21.f,
-                                                    144.f, 144.f, 144.f, 233.f, 233.f,
+                                        {
+                                              1.f,   1.f,   1.f,   2.f,   2.f,
+                                             13.f,  13.f,  13.f,  21.f,  21.f,
+                                            144.f, 144.f, 144.f, 233.f, 233.f,
 
-                                                    233.f, 233.f, 233.f, 144.f, 144.f,
-                                                     21.f,  21.f,  21.f,  13.f,  13.f,
-                                                      2.f,   2.f,   2.f,   1.f,   1.f
-                                            };
+                                            233.f, 233.f, 233.f, 144.f, 144.f,
+                                             21.f,  21.f,  21.f,  13.f,  13.f,
+                                              2.f,   2.f,   2.f,   1.f,   1.f
+                                        };
 
     const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
     if (dataLayout == armnn::DataLayout::NHWC)
@@ -3488,7 +3496,6 @@ LayerTestResult<T, 4> ResizeNearestNeighborMagTest(
     return result;
 }
 
-
 template<armnn::DataType ArmnnType, typename T, std::size_t InputDim, std::size_t OutputDim>
 LayerTestResult<T, OutputDim> MeanTestHelper(
         armnn::IWorkloadFactory& workloadFactory,
index 6d9b197..7eb1dcf 100644 (file)
@@ -40,6 +40,7 @@
 #include "workloads/ClPermuteWorkload.hpp"
 #include "workloads/ClPooling2dWorkload.hpp"
 #include "workloads/ClPreluWorkload.hpp"
+#include "workloads/ClResizeWorkload.hpp"
 #include "workloads/ClQuantizeWorkload.hpp"
 #include "workloads/ClSoftmaxBaseWorkload.hpp"
 #include "workloads/ClSpaceToBatchNdWorkload.hpp"
@@ -570,28 +571,22 @@ bool ClLayerSupport::IsResizeSupported(const TensorInfo& input,
                                        const ResizeDescriptor& descriptor,
                                        Optional<std::string&> reasonIfUnsupported) const
 {
-    ignore_unused(output);
-
-    if (descriptor.m_Method == ResizeMethod::Bilinear)
-    {
-        return IsSupportedForDataTypeCl(reasonIfUnsupported,
-                                        input.GetDataType(),
-                                        &TrueFunc<>,
-                                        &FalseFuncU8<>);
-    }
-
-    return false;
+    FORWARD_WORKLOAD_VALIDATE_FUNC(ClResizeWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
 }
 
 bool ClLayerSupport::IsResizeBilinearSupported(const TensorInfo& input,
                                                const TensorInfo& output,
                                                Optional<std::string&> reasonIfUnsupported) const
 {
-    ignore_unused(output);
-    return IsSupportedForDataTypeCl(reasonIfUnsupported,
-                                    input.GetDataType(),
-                                    &TrueFunc<>,
-                                    &FalseFuncU8<>);
+    ResizeDescriptor descriptor;
+    descriptor.m_Method     = ResizeMethod::Bilinear;
+    descriptor.m_DataLayout = DataLayout::NCHW;
+
+    const TensorShape& outputShape = output.GetShape();
+    descriptor.m_TargetHeight = outputShape[2];
+    descriptor.m_TargetWidth  = outputShape[3];
+
+    return IsResizeSupported(input, output, descriptor, reasonIfUnsupported);
 }
 
 bool ClLayerSupport::IsSoftmaxSupported(const TensorInfo& input,
index 506acb4..6ce87d8 100644 (file)
@@ -260,27 +260,23 @@ std::unique_ptr<armnn::IWorkload> ClWorkloadFactory::CreateMemCopy(const MemCopy
 std::unique_ptr<armnn::IWorkload> ClWorkloadFactory::CreateResize(const ResizeQueueDescriptor& descriptor,
                                                                   const WorkloadInfo& info) const
 {
-    if (descriptor.m_Parameters.m_Method == ResizeMethod::Bilinear)
-    {
-        ResizeBilinearQueueDescriptor resizeBilinearDescriptor;
-        resizeBilinearDescriptor.m_Inputs  = descriptor.m_Inputs;
-        resizeBilinearDescriptor.m_Outputs = descriptor.m_Outputs;
-
-        resizeBilinearDescriptor.m_Parameters.m_DataLayout   = descriptor.m_Parameters.m_DataLayout;
-        resizeBilinearDescriptor.m_Parameters.m_TargetWidth  = descriptor.m_Parameters.m_TargetWidth;
-        resizeBilinearDescriptor.m_Parameters.m_TargetHeight = descriptor.m_Parameters.m_TargetHeight;
-
-        return MakeWorkload<ClResizeBilinearFloatWorkload, NullWorkload>(resizeBilinearDescriptor, info);
-    }
-
-    return MakeWorkload<NullWorkload, NullWorkload>(descriptor, info);
+    return MakeWorkload<ClResizeWorkload>(descriptor, info);
 }
 
 std::unique_ptr<armnn::IWorkload> ClWorkloadFactory::CreateResizeBilinear(
     const ResizeBilinearQueueDescriptor& descriptor,
     const WorkloadInfo& info) const
 {
-    return MakeWorkload<ClResizeBilinearFloatWorkload, NullWorkload>(descriptor, info);
+    ResizeQueueDescriptor resizeDescriptor;
+    resizeDescriptor.m_Inputs  = descriptor.m_Inputs;
+    resizeDescriptor.m_Outputs = descriptor.m_Outputs;
+
+    resizeDescriptor.m_Parameters.m_Method       = ResizeMethod::Bilinear;
+    resizeDescriptor.m_Parameters.m_DataLayout   = descriptor.m_Parameters.m_DataLayout;
+    resizeDescriptor.m_Parameters.m_TargetHeight = descriptor.m_Parameters.m_TargetHeight;
+    resizeDescriptor.m_Parameters.m_TargetWidth  = descriptor.m_Parameters.m_TargetWidth;
+
+    return CreateResize(resizeDescriptor, info);
 }
 
 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateFakeQuantization(
index 1bc1fb3..57d7cb9 100644 (file)
@@ -48,7 +48,7 @@ BACKEND_SOURCES := \
         workloads/ClPreluWorkload.cpp \
         workloads/ClQuantizeWorkload.cpp \
         workloads/ClReshapeWorkload.cpp \
-        workloads/ClResizeBilinearFloatWorkload.cpp \
+        workloads/ClResizeWorkload.cpp \
         workloads/ClSoftmaxBaseWorkload.cpp \
         workloads/ClSoftmaxFloatWorkload.cpp \
         workloads/ClSoftmaxUint8Workload.cpp \
index b89abdb..de13390 100644 (file)
@@ -758,16 +758,15 @@ BOOST_AUTO_TEST_CASE(CreateLSTMWorkloadFloatWorkload)
     ClCreateLstmWorkloadTest<ClLstmFloatWorkload>();
 }
 
-template <typename ResizeBilinearWorkloadType, typename armnn::DataType DataType>
-static void ClResizeBilinearWorkloadTest(DataLayout dataLayout)
+template <typename ResizeWorkloadType, typename armnn::DataType DataType>
+static void ClResizeWorkloadTest(DataLayout dataLayout)
 {
     Graph graph;
     ClWorkloadFactory factory =
         ClWorkloadFactoryHelper::GetFactory(ClWorkloadFactoryHelper::GetMemoryManager());
 
-    auto workload = CreateResizeBilinearWorkloadTest<ResizeBilinearWorkloadType, DataType>(factory, graph, dataLayout);
+    auto workload = CreateResizeBilinearWorkloadTest<ResizeWorkloadType, DataType>(factory, graph, dataLayout);
 
-    // Checks that inputs/outputs are as we expect them (see definition of CreateResizeBilinearWorkloadTest).
     auto queueDescriptor = workload->GetData();
 
     auto inputHandle  = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
@@ -786,24 +785,34 @@ static void ClResizeBilinearWorkloadTest(DataLayout dataLayout)
     }
 }
 
-BOOST_AUTO_TEST_CASE(CreateResizeBilinearFloat32NchwWorkload)
+BOOST_AUTO_TEST_CASE(CreateResizeFloat32NchwWorkload)
 {
-    ClResizeBilinearWorkloadTest<ClResizeBilinearFloatWorkload, armnn::DataType::Float32>(DataLayout::NCHW);
+    ClResizeWorkloadTest<ClResizeWorkload, armnn::DataType::Float32>(DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(CreateResizeBilinearFloat16NchwWorkload)
+BOOST_AUTO_TEST_CASE(CreateResizeFloat16NchwWorkload)
 {
-    ClResizeBilinearWorkloadTest<ClResizeBilinearFloatWorkload, armnn::DataType::Float16>(DataLayout::NCHW);
+    ClResizeWorkloadTest<ClResizeWorkload, armnn::DataType::Float16>(DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(CreateResizeBilinearFloat32NhwcWorkload)
+BOOST_AUTO_TEST_CASE(CreateResizeUint8NchwWorkload)
 {
-    ClResizeBilinearWorkloadTest<ClResizeBilinearFloatWorkload, armnn::DataType::Float32>(DataLayout::NHWC);
+    ClResizeWorkloadTest<ClResizeWorkload, armnn::DataType::QuantisedAsymm8>(DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(CreateResizeBilinearFloat16NhwcWorkload)
+BOOST_AUTO_TEST_CASE(CreateResizeFloat32NhwcWorkload)
 {
-    ClResizeBilinearWorkloadTest<ClResizeBilinearFloatWorkload, armnn::DataType::Float16>(DataLayout::NHWC);
+    ClResizeWorkloadTest<ClResizeWorkload, armnn::DataType::Float32>(DataLayout::NHWC);
+}
+
+BOOST_AUTO_TEST_CASE(CreateResizeFloat16NhwcWorkload)
+{
+    ClResizeWorkloadTest<ClResizeWorkload, armnn::DataType::Float16>(DataLayout::NHWC);
+}
+
+BOOST_AUTO_TEST_CASE(CreateResizeUint8NhwcWorkload)
+{
+    ClResizeWorkloadTest<ClResizeWorkload, armnn::DataType::QuantisedAsymm8>(DataLayout::NHWC);
 }
 
 template <typename MeanWorkloadType, typename armnn::DataType DataType>
index c786244..f2ff294 100644 (file)
@@ -305,30 +305,6 @@ ARMNN_AUTO_TEST_CASE(L2Normalization4dNhwc, L2Normalization4dTest, armnn::DataLa
 ARMNN_AUTO_TEST_CASE(L2NormalizationDefaultEpsilon, L2NormalizationDefaultEpsilonTest, armnn::DataLayout::NCHW)
 ARMNN_AUTO_TEST_CASE(L2NormalizationNonDefaultEpsilon, L2NormalizationNonDefaultEpsilonTest, armnn::DataLayout::NCHW)
 
-// Resize Bilinear - NCHW data layout
-ARMNN_AUTO_TEST_CASE(SimpleResizeBilinear, SimpleResizeBilinearTest<armnn::DataType::Float32>, armnn::DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(ResizeBilinearNop, ResizeBilinearNopTest<armnn::DataType::Float32>, armnn::DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMin, ResizeBilinearSqMinTest<armnn::DataType::Float32>, armnn::DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(ResizeBilinearMin, ResizeBilinearMinTest<armnn::DataType::Float32>, armnn::DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(ResizeBilinearMag, ResizeBilinearMagTest<armnn::DataType::Float32>, armnn::DataLayout::NCHW)
-
-// Resize Bilinear - NHWC data layout
-ARMNN_AUTO_TEST_CASE(ResizeBilinearNopNhwc,
-                     ResizeBilinearNopTest<armnn::DataType::Float32>,
-                     armnn::DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearNhwc,
-                     SimpleResizeBilinearTest<armnn::DataType::Float32>,
-                     armnn::DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinNhwc,
-                     ResizeBilinearSqMinTest<armnn::DataType::Float32>,
-                     armnn::DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(ResizeBilinearMinNhwc,
-                     ResizeBilinearMinTest<armnn::DataType::Float32>,
-                     armnn::DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(ResizeBilinearMagNhwc,
-                     ResizeBilinearMagTest<armnn::DataType::Float32>,
-                     armnn::DataLayout::NHWC)
-
 // Constant
 ARMNN_AUTO_TEST_CASE(Constant, ConstantTest)
 ARMNN_AUTO_TEST_CASE(ConstantUint8, ConstantUint8SimpleQuantizationScaleNoOffsetTest)
@@ -539,6 +515,122 @@ ARMNN_AUTO_TEST_CASE(StridedSlice3DReverseUint8, StridedSlice3DReverseUint8Test)
 ARMNN_AUTO_TEST_CASE(StridedSlice2DUint8, StridedSlice2DUint8Test)
 ARMNN_AUTO_TEST_CASE(StridedSlice2DReverseUint8, StridedSlice2DReverseUint8Test)
 
+// Resize Bilinear - NCHW
+ARMNN_AUTO_TEST_CASE(SimpleResizeBilinear,
+                     SimpleResizeBilinearTest<armnn::DataType::Float32>,
+                     armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearUint8,
+                     SimpleResizeBilinearTest<armnn::DataType::QuantisedAsymm8>,
+                     armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearNop,
+                     ResizeBilinearNopTest<armnn::DataType::Float32>,
+                     armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearNopUint8,
+                     ResizeBilinearNopTest<armnn::DataType::QuantisedAsymm8>,
+                     armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMin,
+                     ResizeBilinearSqMinTest<armnn::DataType::Float32>,
+                     armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinUint8,
+                     ResizeBilinearSqMinTest<armnn::DataType::QuantisedAsymm8>,
+                     armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearMin,
+                     ResizeBilinearMinTest<armnn::DataType::Float32>,
+                     armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearMinUint8,
+                     ResizeBilinearMinTest<armnn::DataType::QuantisedAsymm8>,
+                     armnn::DataLayout::NCHW)
+
+// Resize Bilinear - NHWC
+ARMNN_AUTO_TEST_CASE(ResizeBilinearNopNhwc,
+                     ResizeBilinearNopTest<armnn::DataType::Float32>,
+                     armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearNopUint8Nhwc,
+                     ResizeBilinearNopTest<armnn::DataType::QuantisedAsymm8>,
+                     armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearNhwc,
+                     SimpleResizeBilinearTest<armnn::DataType::Float32>,
+                     armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearUint8Nhwc,
+                     SimpleResizeBilinearTest<armnn::DataType::QuantisedAsymm8>,
+                     armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinNhwc,
+                     ResizeBilinearSqMinTest<armnn::DataType::Float32>,
+                     armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinUint8Nhwc,
+                     ResizeBilinearSqMinTest<armnn::DataType::QuantisedAsymm8>,
+                     armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearMinNhwc,
+                     ResizeBilinearMinTest<armnn::DataType::Float32>,
+                     armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearMinUint8Nhwc,
+                     ResizeBilinearMinTest<armnn::DataType::QuantisedAsymm8>,
+                     armnn::DataLayout::NHWC)
+
+// Resize NearestNeighbor - NCHW
+ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighbor,
+                     SimpleResizeNearestNeighborTest<armnn::DataType::Float32>,
+                     armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighborUint8,
+                     SimpleResizeNearestNeighborTest<armnn::DataType::QuantisedAsymm8>,
+                     armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborNop,
+                     ResizeNearestNeighborNopTest<armnn::DataType::Float32>,
+                     armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborNopUint8,
+                     ResizeNearestNeighborNopTest<armnn::DataType::QuantisedAsymm8>,
+                     armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMin,
+                     ResizeNearestNeighborSqMinTest<armnn::DataType::Float32>,
+                     armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMinUint8,
+                     ResizeNearestNeighborSqMinTest<armnn::DataType::QuantisedAsymm8>,
+                     armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMin,
+                     ResizeNearestNeighborMinTest<armnn::DataType::Float32>,
+                     armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMinUint8,
+                     ResizeNearestNeighborMinTest<armnn::DataType::QuantisedAsymm8>,
+                     armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMag,
+                     ResizeNearestNeighborMagTest<armnn::DataType::Float32>,
+                     armnn::DataLayout::NCHW, 0.1f, 50, 0.1f, 50)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMagUint8,
+                     ResizeNearestNeighborMagTest<armnn::DataType::QuantisedAsymm8>,
+                     armnn::DataLayout::NCHW, 0.1f, 50, 0.1f, 50)
+
+// Resize NearestNeighbor - NHWC
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborNopNhwc,
+                     ResizeNearestNeighborNopTest<armnn::DataType::Float32>,
+                     armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborNopUint8Nhwc,
+                     ResizeNearestNeighborNopTest<armnn::DataType::QuantisedAsymm8>,
+                     armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighborNhwc,
+                     SimpleResizeNearestNeighborTest<armnn::DataType::Float32>,
+                     armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighborUint8Nhwc,
+                     SimpleResizeNearestNeighborTest<armnn::DataType::QuantisedAsymm8>,
+                     armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMinNhwc,
+                     ResizeNearestNeighborSqMinTest<armnn::DataType::Float32>,
+                     armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMinUint8Nhwc,
+                     ResizeNearestNeighborSqMinTest<armnn::DataType::QuantisedAsymm8>,
+                     armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMinNhwc,
+                     ResizeNearestNeighborMinTest<armnn::DataType::Float32>,
+                     armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMinUint8Nhwc,
+                     ResizeNearestNeighborMinTest<armnn::DataType::QuantisedAsymm8>,
+                     armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMagNhwc,
+                     ResizeNearestNeighborMagTest<armnn::DataType::Float32>,
+                     armnn::DataLayout::NHWC, 0.1f, 50, 0.1f, 50)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMagUint8Nhwc,
+                     ResizeNearestNeighborMagTest<armnn::DataType::QuantisedAsymm8>,
+                     armnn::DataLayout::NHWC, 0.1f, 50, 0.1f, 50)
+
 // Quantize
 ARMNN_AUTO_TEST_CASE(QuantizeSimpleUint8, QuantizeSimpleUint8Test)
 ARMNN_AUTO_TEST_CASE(QuantizeClampUint8, QuantizeClampUint8Test)
index d98956f..2a3b1ad 100644 (file)
@@ -60,8 +60,8 @@ list(APPEND armnnClBackendWorkloads_sources
     ClQuantizeWorkload.hpp
     ClReshapeWorkload.cpp
     ClReshapeWorkload.hpp
-    ClResizeBilinearFloatWorkload.cpp
-    ClResizeBilinearFloatWorkload.hpp
+    ClResizeWorkload.cpp
+    ClResizeWorkload.hpp
     ClSoftmaxBaseWorkload.cpp
     ClSoftmaxBaseWorkload.hpp
     ClSoftmaxFloatWorkload.cpp
diff --git a/src/backends/cl/workloads/ClResizeBilinearFloatWorkload.cpp b/src/backends/cl/workloads/ClResizeBilinearFloatWorkload.cpp
deleted file mode 100644 (file)
index ac7d60c..0000000
+++ /dev/null
@@ -1,44 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "ClResizeBilinearFloatWorkload.hpp"
-#include <cl/ClTensorHandle.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
-#include <cl/ClLayerSupport.hpp>
-#include <aclCommon/ArmComputeUtils.hpp>
-#include <aclCommon/ArmComputeTensorUtils.hpp>
-
-#include "ClWorkloadUtils.hpp"
-
-using namespace armnn::armcomputetensorutils;
-
-namespace armnn
-{
-
-ClResizeBilinearFloatWorkload::ClResizeBilinearFloatWorkload(const ResizeBilinearQueueDescriptor& descriptor,
-                                                             const WorkloadInfo& info)
-    : FloatWorkload<ResizeBilinearQueueDescriptor>(descriptor, info)
-{
-    m_Data.ValidateInputsOutputs("ClResizeBilinearFloatWorkload", 1, 1);
-
-    arm_compute::ICLTensor& input  = static_cast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
-    arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
-
-    arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout);
-    input.info()->set_data_layout(aclDataLayout);
-    output.info()->set_data_layout(aclDataLayout);
-
-    m_ResizeBilinearLayer.configure(&input, &output, arm_compute::InterpolationPolicy::BILINEAR,
-                                    arm_compute::BorderMode::REPLICATE, arm_compute::PixelValue(0.f),
-                                    arm_compute::SamplingPolicy::TOP_LEFT);
-};
-
-void ClResizeBilinearFloatWorkload::Execute() const
-{
-    ARMNN_SCOPED_PROFILING_EVENT_CL("ClResizeBilinearFloatWorkload_Execute");
-    RunClFunction(m_ResizeBilinearLayer, CHECK_LOCATION());
-}
-
-} //namespace armnn
diff --git a/src/backends/cl/workloads/ClResizeBilinearFloatWorkload.hpp b/src/backends/cl/workloads/ClResizeBilinearFloatWorkload.hpp
deleted file mode 100644 (file)
index 07ddcec..0000000
+++ /dev/null
@@ -1,25 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include <backendsCommon/Workload.hpp>
-
-#include <arm_compute/runtime/CL/CLFunctions.h>
-
-namespace armnn
-{
-
-class ClResizeBilinearFloatWorkload : public FloatWorkload<ResizeBilinearQueueDescriptor>
-{
-public:
-    ClResizeBilinearFloatWorkload(const ResizeBilinearQueueDescriptor& descriptor, const WorkloadInfo& info);
-    void Execute() const override;
-
-private:
-    mutable arm_compute::CLScale m_ResizeBilinearLayer;
-};
-
-} //namespace armnn
diff --git a/src/backends/cl/workloads/ClResizeWorkload.cpp b/src/backends/cl/workloads/ClResizeWorkload.cpp
new file mode 100644 (file)
index 0000000..3c9c3aa
--- /dev/null
@@ -0,0 +1,74 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "ClResizeWorkload.hpp"
+
+#include "ClWorkloadUtils.hpp"
+
+#include <aclCommon/ArmComputeUtils.hpp>
+#include <aclCommon/ArmComputeTensorUtils.hpp>
+
+#include <backendsCommon/CpuTensorHandle.hpp>
+
+#include <cl/ClLayerSupport.hpp>
+#include <cl/ClTensorHandle.hpp>
+
+using namespace armnn::armcomputetensorutils;
+
+namespace armnn
+{
+
+arm_compute::Status ClResizeWorkloadValidate(const TensorInfo& input,
+                                             const TensorInfo& output,
+                                             const ResizeDescriptor& descriptor)
+{
+    arm_compute::TensorInfo aclInputInfo  = BuildArmComputeTensorInfo(input);
+    arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output);
+
+    arm_compute::DataLayout aclDataLayout = ConvertDataLayout(descriptor.m_DataLayout);
+    aclInputInfo.set_data_layout(aclDataLayout);
+    aclOutputInfo.set_data_layout(aclDataLayout);
+
+    arm_compute::InterpolationPolicy aclInterpolationPolicy =
+        ConvertResizeMethodToAclInterpolationPolicy(descriptor.m_Method);
+
+    return arm_compute::CLScale::validate(&aclInputInfo,
+                                          &aclOutputInfo,
+                                          aclInterpolationPolicy,
+                                          arm_compute::BorderMode::REPLICATE,
+                                          arm_compute::PixelValue(0.f),
+                                          arm_compute::SamplingPolicy::TOP_LEFT);
+}
+
+ClResizeWorkload::ClResizeWorkload(const ResizeQueueDescriptor& descriptor, const WorkloadInfo& info) :
+    BaseWorkload<ResizeQueueDescriptor>(descriptor, info)
+{
+    m_Data.ValidateInputsOutputs("ClResizeWorkload", 1, 1);
+
+    arm_compute::ICLTensor& input  = static_cast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+    arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+
+    arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout);
+    input.info()->set_data_layout(aclDataLayout);
+    output.info()->set_data_layout(aclDataLayout);
+
+    arm_compute::InterpolationPolicy aclInterpolationPolicy =
+        ConvertResizeMethodToAclInterpolationPolicy(descriptor.m_Parameters.m_Method);
+
+    m_ResizeLayer.configure(&input,
+                            &output,
+                            aclInterpolationPolicy,
+                            arm_compute::BorderMode::REPLICATE,
+                            arm_compute::PixelValue(0.f),
+                            arm_compute::SamplingPolicy::TOP_LEFT);
+};
+
+void ClResizeWorkload::Execute() const
+{
+    ARMNN_SCOPED_PROFILING_EVENT_CL("ClResizeWorkload_Execute");
+    RunClFunction(m_ResizeLayer, CHECK_LOCATION());
+}
+
+} //namespace armnn
diff --git a/src/backends/cl/workloads/ClResizeWorkload.hpp b/src/backends/cl/workloads/ClResizeWorkload.hpp
new file mode 100644 (file)
index 0000000..5a128fa
--- /dev/null
@@ -0,0 +1,29 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <backendsCommon/Workload.hpp>
+
+#include <arm_compute/runtime/CL/CLFunctions.h>
+
+namespace armnn
+{
+
+arm_compute::Status ClResizeWorkloadValidate(const TensorInfo& input,
+                                             const TensorInfo& output,
+                                             const ResizeDescriptor& descriptor);
+
+class ClResizeWorkload : public BaseWorkload<ResizeQueueDescriptor>
+{
+public:
+    ClResizeWorkload(const ResizeQueueDescriptor& descriptor, const WorkloadInfo& info);
+    void Execute() const override;
+
+private:
+    mutable arm_compute::CLScale m_ResizeLayer;
+};
+
+} // namespace armnn
index 256b68c..a64dea2 100644 (file)
@@ -30,7 +30,7 @@
 #include "ClPreluWorkload.hpp"
 #include "ClQuantizeWorkload.hpp"
 #include "ClReshapeWorkload.hpp"
-#include "ClResizeBilinearFloatWorkload.hpp"
+#include "ClResizeWorkload.hpp"
 #include "ClSoftmaxFloatWorkload.hpp"
 #include "ClSoftmaxUint8Workload.hpp"
 #include "ClSpaceToBatchNdWorkload.hpp"
index 447d95e..57ac946 100644 (file)
@@ -680,10 +680,10 @@ ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMinUint16,
                      armnn::DataLayout::NCHW)
 ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMag,
                      ResizeNearestNeighborMagTest<armnn::DataType::Float32>,
-                     armnn::DataLayout::NCHW)
+                     armnn::DataLayout::NCHW, 0.10f, 50, 0.11f, 20)
 ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMagUint8,
                      ResizeNearestNeighborMagTest<armnn::DataType::QuantisedAsymm8>,
-                     armnn::DataLayout::NCHW)
+                     armnn::DataLayout::NCHW, 0.10f, 50, 0.11f, 20)
 ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMagUint16,
                      SimpleResizeNearestNeighborTest<armnn::DataType::QuantisedSymm16>,
                      armnn::DataLayout::NCHW)
@@ -727,10 +727,10 @@ ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMinUint16Nhwc,
                      armnn::DataLayout::NHWC)
 ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMagNhwc,
                      ResizeNearestNeighborMagTest<armnn::DataType::Float32>,
-                     armnn::DataLayout::NHWC)
+                     armnn::DataLayout::NHWC, 0.10f, 50, 0.11f, 20)
 ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMagUint8Nhwc,
                      ResizeNearestNeighborMagTest<armnn::DataType::QuantisedAsymm8>,
-                     armnn::DataLayout::NHWC)
+                     armnn::DataLayout::NHWC, 0.10f, 50, 0.11f, 20)
 ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMagUint16Nhwc,
                      ResizeNearestNeighborNopTest<armnn::DataType::QuantisedSymm16>,
                      armnn::DataLayout::NHWC)