IVGCVSW-1932: Add Unit tests for NHWC ResizeBilinear
authorJames Conroy <james.conroy@arm.com>
Wed, 3 Oct 2018 08:32:03 +0000 (09:32 +0100)
committerMatthew Bentham <matthew.bentham@arm.com>
Wed, 10 Oct 2018 15:16:58 +0000 (16:16 +0100)
* Adds five unit tests that execute ResizeBilinear
  with the NHWC data layout and Float32 data type.
* Refactors original ResizeBilinear Float32 tests
  to take DataLayout as a parameter.
* Adds four unit tests that execute CreateWorkloadCl
  for both NCHW and NHWC (NCHW tests did not exist
  for CreateWorkloadCl).

Change-Id: I1af419ed0b62b8f4d4550f6d120a584a0a223b17

src/armnn/test/CreateWorkload.hpp
src/backends/test/ArmComputeCl.cpp
src/backends/test/CreateWorkloadCl.cpp
src/backends/test/LayerTests.cpp
src/backends/test/LayerTests.hpp

index 66f6282..f2c8b5a 100644 (file)
@@ -807,13 +807,33 @@ void CreateSplitterMultipleInputsOneOutputWorkloadTest(armnn::IWorkloadFactory&
 
 template <typename ResizeBilinearWorkload, armnn::DataType DataType>
 std::unique_ptr<ResizeBilinearWorkload> CreateResizeBilinearWorkloadTest(armnn::IWorkloadFactory& factory,
-    armnn::Graph& graph)
+                                                                         armnn::Graph& graph,
+                                                                         DataLayout dataLayout = DataLayout::NCHW)
 {
+    TensorShape inputShape;
+    TensorShape outputShape;
+    unsigned int heightIndex;
+    unsigned int widthIndex;
+
+    switch (dataLayout) {
+        case DataLayout::NHWC:
+            inputShape = { 2, 4, 4, 3 };
+            outputShape = { 2, 2, 2, 3 };
+            heightIndex = 1;
+            widthIndex = 2;
+            break;
+        default: // NCHW
+            inputShape = { 2, 3, 4, 4 };
+            outputShape = { 2, 3, 2, 2 };
+            heightIndex = 2;
+            widthIndex = 3;
+    }
+
     // Creates the layer we're testing.
-    TensorShape outputShape({ 2, 3, 2, 2 });
     ResizeBilinearDescriptor resizeDesc;
-    resizeDesc.m_TargetWidth = outputShape[3];
-    resizeDesc.m_TargetHeight = outputShape[2];
+    resizeDesc.m_TargetWidth = outputShape[widthIndex];
+    resizeDesc.m_TargetHeight = outputShape[heightIndex];
+    resizeDesc.m_DataLayout = dataLayout;
     Layer* const layer = graph.AddLayer<ResizeBilinearLayer>(resizeDesc, "layer");
 
     // Creates extra layers.
@@ -821,7 +841,7 @@ std::unique_ptr<ResizeBilinearWorkload> CreateResizeBilinearWorkloadTest(armnn::
     Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
 
     // Connects up.
-    armnn::TensorInfo inputTensorInfo({ 2, 3, 4, 4 }, DataType);
+    armnn::TensorInfo inputTensorInfo(inputShape, DataType);
     armnn::TensorInfo outputTensorInfo(outputShape, DataType);
     Connect(input, layer, inputTensorInfo);
     Connect(layer, output, outputTensorInfo);
@@ -833,6 +853,7 @@ std::unique_ptr<ResizeBilinearWorkload> CreateResizeBilinearWorkloadTest(armnn::
     ResizeBilinearQueueDescriptor queueDescriptor = workload->GetData();
     BOOST_TEST(queueDescriptor.m_Inputs.size() == 1);
     BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
+    BOOST_TEST((queueDescriptor.m_Parameters.m_DataLayout == dataLayout));
 
     // Returns so we can do extra, backend-specific tests.
     return workload;
index d432a26..faa3d4f 100644 (file)
@@ -187,13 +187,20 @@ ARMNN_AUTO_TEST_CASE(L2Normalization2dNhwc, L2Normalization2dNhwcTest)
 ARMNN_AUTO_TEST_CASE(L2Normalization3dNhwc, L2Normalization3dNhwcTest)
 ARMNN_AUTO_TEST_CASE(L2Normalization4dNhwc, L2Normalization4dNhwcTest)
 
-// Resize Bilinear
-ARMNN_AUTO_TEST_CASE(SimpleResizeBilinear, SimpleResizeBilinearTest)
+// Resize Bilinear - NCHW data layout
 ARMNN_AUTO_TEST_CASE(ResizeBilinearNop, ResizeBilinearNopTest)
+ARMNN_AUTO_TEST_CASE(SimpleResizeBilinear, SimpleResizeBilinearTest)
 ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMin, ResizeBilinearSqMinTest)
 ARMNN_AUTO_TEST_CASE(ResizeBilinearMin, ResizeBilinearMinTest)
 ARMNN_AUTO_TEST_CASE(ResizeBilinearMag, ResizeBilinearMagTest)
 
+// Resize Bilinear - NHWC data layout
+ARMNN_AUTO_TEST_CASE(ResizeBilinearNopNhwc, ResizeBilinearNopNhwcTest)
+ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearNhwc, SimpleResizeBilinearNhwcTest)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinNhwc, ResizeBilinearSqMinNhwcTest)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearMinNhwc, ResizeBilinearMinNhwcTest)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearMagNhwc, ResizeBilinearMagNhwcTest)
+
 // Constant
 ARMNN_AUTO_TEST_CASE(Constant, ConstantTest)
 ARMNN_AUTO_TEST_CASE(ConstantUint8, ConstantTestUint8)
index e81f844..e7e39b0 100644 (file)
@@ -596,5 +596,49 @@ BOOST_AUTO_TEST_CASE(CreateLSTMWorkloadFloatWorkload)
     ClCreateLstmWorkloadTest<ClLstmFloatWorkload>();
 }
 
+template <typename ResizeBilinearWorkloadType, typename armnn::DataType DataType>
+static void ClResizeBilinearWorkloadTest(DataLayout dataLayout)
+{
+    Graph graph;
+    ClWorkloadFactory factory;
+
+    auto workload = CreateResizeBilinearWorkloadTest<ResizeBilinearWorkloadType, DataType>(factory, graph, dataLayout);
+
+    // Checks that inputs/outputs are as we expect them (see definition of CreateResizeBilinearWorkloadTest).
+    ResizeBilinearQueueDescriptor queueDescriptor = workload->GetData();
+    auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
+    auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
+
+    switch (dataLayout)
+    {
+        case DataLayout::NHWC:
+            BOOST_TEST(CompareIClTensorHandleShape(inputHandle, { 2, 4, 4, 3 }));
+            BOOST_TEST(CompareIClTensorHandleShape(outputHandle, { 2, 2, 2, 3 }));
+            break;
+        default: // NCHW
+            BOOST_TEST(CompareIClTensorHandleShape(inputHandle, { 2, 3, 4, 4 }));
+            BOOST_TEST(CompareIClTensorHandleShape(outputHandle, { 2, 3, 2, 2 }));
+    }
+}
+
+BOOST_AUTO_TEST_CASE(CreateResizeBilinearFloat32NchwWorkload)
+{
+    ClResizeBilinearWorkloadTest<ClResizeBilinearFloatWorkload, armnn::DataType::Float32>(DataLayout::NCHW);
+}
+
+BOOST_AUTO_TEST_CASE(CreateResizeBilinearFloat16NchwWorkload)
+{
+    ClResizeBilinearWorkloadTest<ClResizeBilinearFloatWorkload, armnn::DataType::Float16>(DataLayout::NCHW);
+}
+
+BOOST_AUTO_TEST_CASE(CreateResizeBilinearFloat32NhwcWorkload)
+{
+    ClResizeBilinearWorkloadTest<ClResizeBilinearFloatWorkload, armnn::DataType::Float32>(DataLayout::NHWC);
+}
+
+BOOST_AUTO_TEST_CASE(CreateResizeBilinearFloat16NhwcWorkload)
+{
+    ClResizeBilinearWorkloadTest<ClResizeBilinearFloatWorkload, armnn::DataType::Float16>(DataLayout::NHWC);
+}
 
 BOOST_AUTO_TEST_SUITE_END()
index 066d0c2..a7fb6a8 100644 (file)
@@ -2907,22 +2907,12 @@ LayerTestResult<float, 3> Concatenation3dDim2DiffInputDimsTest(armnn::IWorkloadF
     return Concatenation3dDim2DiffInputDimsTestImpl<float>(workloadFactory, 0.0f, 0);
 }
 
-LayerTestResult<float, 4> ResizeBilinearNopTest(armnn::IWorkloadFactory& workloadFactory)
+LayerTestResult<float, 4> ResizeBilinearNopTestImpl(armnn::IWorkloadFactory& workloadFactory,
+                                                    const armnn::TensorShape& inputOutputTensorShape,
+                                                    armnn::DataLayout dataLayout)
 {
-    constexpr unsigned int inputWidth = 4;
-    constexpr unsigned int inputHeight = 4;
-    constexpr unsigned int inputChannels = 1;
-    constexpr unsigned int inputBatchSize = 1;
-
-    constexpr unsigned int outputWidth = inputWidth;
-    constexpr unsigned int outputHeight = inputHeight;
-    constexpr unsigned int outputChannels = inputChannels;
-    constexpr unsigned int outputBatchSize = inputBatchSize;
-
-    const armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
-        armnn::DataType::Float32);
-    const armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
-        armnn::DataType::Float32);
+    const armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32);
+    const armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32);
 
     auto input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({
         1.0f, 2.0f, 3.0f, 4.0f,
@@ -2938,6 +2928,7 @@ LayerTestResult<float, 4> ResizeBilinearNopTest(armnn::IWorkloadFactory& workloa
     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
 
     armnn::ResizeBilinearQueueDescriptor descriptor;
+    descriptor.m_Parameters.m_DataLayout = dataLayout;
     armnn::WorkloadInfo info;
     AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
     AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
@@ -2955,26 +2946,33 @@ LayerTestResult<float, 4> ResizeBilinearNopTest(armnn::IWorkloadFactory& workloa
     return result;
 }
 
-LayerTestResult<float, 4> SimpleResizeBilinearTest(armnn::IWorkloadFactory& workloadFactory)
+LayerTestResult<float, 4> ResizeBilinearNopTest(armnn::IWorkloadFactory& workloadFactory)
 {
-    constexpr unsigned int inputWidth = 2;
-    constexpr unsigned int inputHeight = 2;
-    constexpr unsigned int inputChannels = 1;
-    constexpr unsigned int inputBatchSize = 1;
+    // BatchSize = 1, Channels = 1, Height = 4, Width = 4
+    const armnn::TensorShape inputOutputShape{ 1, 1, 4, 4 };
 
-    constexpr unsigned int outputWidth = inputWidth / 2;
-    constexpr unsigned int outputHeight = inputHeight / 2;
-    constexpr unsigned int outputChannels = inputChannels;
-    constexpr unsigned int outputBatchSize = inputBatchSize;
+    return ResizeBilinearNopTestImpl(workloadFactory, inputOutputShape, armnn::DataLayout::NCHW);
+}
 
-    const armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
-        armnn::DataType::Float32);
-    const armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
-        armnn::DataType::Float32);
+LayerTestResult<float, 4> ResizeBilinearNopNhwcTest(armnn::IWorkloadFactory& workloadFactory)
+{
+    // BatchSize = 1, Height = 4, Width = 4, Channels = 1
+    const armnn::TensorShape inputOutputShape{ 1, 4, 4, 1 };
+
+    return ResizeBilinearNopTestImpl(workloadFactory, inputOutputShape, armnn::DataLayout::NHWC);
+}
+
+LayerTestResult<float, 4> SimpleResizeBilinearTestImpl(armnn::IWorkloadFactory& workloadFactory,
+                                                       const armnn::TensorShape& inputTensorShape,
+                                                       const armnn::TensorShape& outputTensorShape,
+                                                       armnn::DataLayout dataLayout)
+{
+    const armnn::TensorInfo inputTensorInfo(inputTensorShape, armnn::DataType::Float32);
+    const armnn::TensorInfo outputTensorInfo(outputTensorShape, armnn::DataType::Float32);
 
     auto input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({
-        1.0f, 255.0f,
-        200.0f, 250.f,
+          1.0f, 255.0f,
+        200.0f, 250.0f
     }));
 
     // The 'resize bilinear' operation projects the top-left corner of output texels into the input image,
@@ -2991,6 +2989,7 @@ LayerTestResult<float, 4> SimpleResizeBilinearTest(armnn::IWorkloadFactory& work
     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
 
     armnn::ResizeBilinearQueueDescriptor descriptor;
+    descriptor.m_Parameters.m_DataLayout = dataLayout;
     armnn::WorkloadInfo info;
     AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
     AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
@@ -3008,22 +3007,35 @@ LayerTestResult<float, 4> SimpleResizeBilinearTest(armnn::IWorkloadFactory& work
     return result;
 }
 
-LayerTestResult<float, 4> ResizeBilinearSqMinTest(armnn::IWorkloadFactory& workloadFactory)
+LayerTestResult<float, 4> SimpleResizeBilinearTest(armnn::IWorkloadFactory& workloadFactory)
 {
-    constexpr unsigned int inputWidth = 4;
-    constexpr unsigned int inputHeight = 4;
-    constexpr unsigned int inputChannels = 1;
-    constexpr unsigned int inputBatchSize = 1;
+    // inputShape: BatchSize = 1, Channels = 1, Height = 2, Width = 2
+    const armnn::TensorShape inputShape{ 1, 1, 2, 2 };
 
-    constexpr unsigned int outputWidth = inputWidth / 2;
-    constexpr unsigned int outputHeight = inputHeight / 2;
-    constexpr unsigned int outputChannels = inputChannels;
-    constexpr unsigned int outputBatchSize = inputBatchSize;
+    // outputShape: BatchSize = 1, Channels = 1, Height = 1, Width = 1
+    const armnn::TensorShape outputShape{ 1, 1, 1, 1 };
 
-    const armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
-        armnn::DataType::Float32);
-    const armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
-        armnn::DataType::Float32);
+    return SimpleResizeBilinearTestImpl(workloadFactory, inputShape, outputShape, armnn::DataLayout::NCHW);
+}
+
+LayerTestResult<float, 4> SimpleResizeBilinearNhwcTest(armnn::IWorkloadFactory& workloadFactory)
+{
+    // inputShape: BatchSize = 1, Height = 2, Width = 2, Channels = 1
+    const armnn::TensorShape inputShape{ 1, 2, 2, 1 };
+
+    // outputShape: BatchSize = 1, Height = 1, Width = 1, Channels = 1
+    const armnn::TensorShape outputShape{ 1, 1, 1, 1 };
+
+    return SimpleResizeBilinearTestImpl(workloadFactory, inputShape, outputShape, armnn::DataLayout::NHWC);
+}
+
+LayerTestResult<float, 4> ResizeBilinearSqMinTestImpl(armnn::IWorkloadFactory& workloadFactory,
+                                                      const armnn::TensorShape& inputTensorShape,
+                                                      const armnn::TensorShape& outputTensorShape,
+                                                      armnn::DataLayout dataLayout)
+{
+    const armnn::TensorInfo inputTensorInfo(inputTensorShape, armnn::DataType::Float32);
+    const armnn::TensorInfo outputTensorInfo(outputTensorShape, armnn::DataType::Float32);
 
     auto input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({
         1.0f, 2.0f, 3.0f, 4.0f,
@@ -3034,14 +3046,15 @@ LayerTestResult<float, 4> ResizeBilinearSqMinTest(armnn::IWorkloadFactory& workl
 
     LayerTestResult<float, 4> result(outputTensorInfo);
     result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, std::vector<float>({
-        1.f, 3.f,
-        3.f, 5.f
+        1.0f, 3.0f,
+        3.0f, 5.0f
     }));
 
     std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
 
     armnn::ResizeBilinearQueueDescriptor descriptor;
+    descriptor.m_Parameters.m_DataLayout = dataLayout;
     armnn::WorkloadInfo info;
     AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
     AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
@@ -3059,22 +3072,35 @@ LayerTestResult<float, 4> ResizeBilinearSqMinTest(armnn::IWorkloadFactory& workl
     return result;
 }
 
-LayerTestResult<float, 4> ResizeBilinearMinTest(armnn::IWorkloadFactory& workloadFactory)
+LayerTestResult<float, 4> ResizeBilinearSqMinTest(armnn::IWorkloadFactory& workloadFactory)
 {
-    constexpr unsigned int inputWidth = 5;
-    constexpr unsigned int inputHeight = 3;
-    constexpr unsigned int inputChannels = 1;
-    constexpr unsigned int inputBatchSize = 1;
+    // inputShape: BatchSize = 1, Channels = 1, Height = 4, Width = 4
+    const armnn::TensorShape inputShape{ 1, 1, 4, 4 };
 
-    constexpr unsigned int outputWidth = 3;
-    constexpr unsigned int outputHeight = 2;
-    constexpr unsigned int outputChannels = inputChannels;
-    constexpr unsigned int outputBatchSize = inputBatchSize;
+    // outputShape: BatchSize = 1, Channels = 1, Height = 2, Width = 2
+    const armnn::TensorShape outputShape{ 1, 1, 2, 2 };
 
-    const armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
-        armnn::DataType::Float32);
-    const armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
-        armnn::DataType::Float32);
+    return ResizeBilinearSqMinTestImpl(workloadFactory, inputShape, outputShape, armnn::DataLayout::NCHW);
+}
+
+LayerTestResult<float, 4> ResizeBilinearSqMinNhwcTest(armnn::IWorkloadFactory& workloadFactory)
+{
+    // inputShape: BatchSize = 1, Height = 4, Width = 4, Channels = 1
+    const armnn::TensorShape inputShape{ 1, 4, 4, 1 };
+
+    // outputShape: BatchSize = 1, Height = 2, Width = 2, Channels = 1
+    const armnn::TensorShape outputShape{ 1, 2, 2, 1 };
+
+    return ResizeBilinearSqMinTestImpl(workloadFactory, inputShape, outputShape, armnn::DataLayout::NHWC);
+}
+
+LayerTestResult<float, 4> ResizeBilinearMinTestImpl(armnn::IWorkloadFactory& workloadFactory,
+                                                    const armnn::TensorShape& inputTensorShape,
+                                                    const armnn::TensorShape& outputTensorShape,
+                                                    armnn::DataLayout dataLayout)
+{
+    const armnn::TensorInfo inputTensorInfo(inputTensorShape, armnn::DataType::Float32);
+    const armnn::TensorInfo outputTensorInfo(outputTensorShape, armnn::DataType::Float32);
 
     auto input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({
           1.0f,   2.0f,   3.0f,   5.0f,   8.0f,
@@ -3084,14 +3110,15 @@ LayerTestResult<float, 4> ResizeBilinearMinTest(armnn::IWorkloadFactory& workloa
 
     LayerTestResult<float, 4> result(outputTensorInfo);
     result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, std::vector<float>({
-        1.0f, 2.6666f, 6.0f,
-        78.5f, 179.3333f, 401.f
+         1.0f,   2.6666f,   6.0f,
+        78.5f, 179.3333f, 401.0f
     }));
 
     std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
 
     armnn::ResizeBilinearQueueDescriptor descriptor;
+    descriptor.m_Parameters.m_DataLayout = dataLayout;
     armnn::WorkloadInfo info;
     AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
     AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
@@ -3109,22 +3136,35 @@ LayerTestResult<float, 4> ResizeBilinearMinTest(armnn::IWorkloadFactory& workloa
     return result;
 }
 
-LayerTestResult<float, 4> ResizeBilinearMagTest(armnn::IWorkloadFactory& workloadFactory)
+LayerTestResult<float, 4> ResizeBilinearMinTest(armnn::IWorkloadFactory& workloadFactory)
 {
-    constexpr unsigned int inputWidth = 2;
-    constexpr unsigned int inputHeight = 3;
-    constexpr unsigned int inputChannels = 1;
-    constexpr unsigned int inputBatchSize = 1;
+    // inputShape: BatchSize = 1, Channels = 1, Height = 3, Width = 5
+    const armnn::TensorShape inputShape{ 1, 1, 3, 5 };
 
-    constexpr unsigned int outputWidth = 5;
-    constexpr unsigned int outputHeight = 3;
-    constexpr unsigned int outputChannels = inputChannels;
-    constexpr unsigned int outputBatchSize = inputBatchSize;
+    // outputShape: BatchSize = 1, Channels = 1, Height = 2, Width = 3
+    const armnn::TensorShape outputShape{ 1, 1, 2, 3 };
 
-    const armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
-        armnn::DataType::Float32);
-    const armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
-        armnn::DataType::Float32);
+    return ResizeBilinearMinTestImpl(workloadFactory, inputShape, outputShape, armnn::DataLayout::NCHW);
+}
+
+LayerTestResult<float, 4> ResizeBilinearMinNhwcTest(armnn::IWorkloadFactory& workloadFactory)
+{
+    // inputShape: BatchSize = 1, Height = 3, Width = 5, Channels = 1
+    const armnn::TensorShape inputShape{ 1, 3, 5, 1 };
+
+    // outputShape: BatchSize = 1, Height = 2, Width = 3, Channels = 1
+    const armnn::TensorShape outputShape{ 1, 2, 3, 1 };
+
+    return ResizeBilinearMinTestImpl(workloadFactory, inputShape, outputShape, armnn::DataLayout::NHWC);
+}
+
+LayerTestResult<float, 4> ResizeBilinearMagTestImpl(armnn::IWorkloadFactory& workloadFactory,
+                                                    const armnn::TensorShape& inputTensorShape,
+                                                    const armnn::TensorShape& outputTensorShape,
+                                                    armnn::DataLayout dataLayout)
+{
+    const armnn::TensorInfo inputTensorInfo(inputTensorShape, armnn::DataType::Float32);
+    const armnn::TensorInfo outputTensorInfo(outputTensorShape, armnn::DataType::Float32);
 
     auto input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({
           1.0f,   2.0f,
@@ -3134,15 +3174,16 @@ LayerTestResult<float, 4> ResizeBilinearMagTest(armnn::IWorkloadFactory& workloa
 
     LayerTestResult<float, 4> result(outputTensorInfo);
     result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, std::vector<float>({
-         1.0f,   1.4f,   1.8f,   2.f,   2.f,
-         13.f,  16.2f,  19.4f,  21.f,  21.f,
-        144.f, 179.6f, 215.2f, 233.f, 233.f
+          1.0f,   1.4f,   1.8f,   2.0f,   2.0f,
+         13.0f,  16.2f,  19.4f,  21.0f,  21.0f,
+        144.0f, 179.6f, 215.2f, 233.0f, 233.0f
     }));
 
     std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
 
     armnn::ResizeBilinearQueueDescriptor descriptor;
+    descriptor.m_Parameters.m_DataLayout = dataLayout;
     armnn::WorkloadInfo info;
     AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
     AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
@@ -3160,6 +3201,28 @@ LayerTestResult<float, 4> ResizeBilinearMagTest(armnn::IWorkloadFactory& workloa
     return result;
 }
 
+LayerTestResult<float, 4> ResizeBilinearMagTest(armnn::IWorkloadFactory& workloadFactory)
+{
+    // inputShape: BatchSize = 1, Channels = 1, Height = 3, Width = 2
+    const armnn::TensorShape inputShape{ 1, 1, 3, 2 };
+
+    // outputShape: BatchSize = 1, Channels = 1, Height = 3, Width = 5
+    const armnn::TensorShape outputShape{ 1, 1, 3, 5 };
+
+    return ResizeBilinearMagTestImpl(workloadFactory, inputShape, outputShape, armnn::DataLayout::NCHW);
+}
+
+LayerTestResult<float, 4> ResizeBilinearMagNhwcTest(armnn::IWorkloadFactory& workloadFactory)
+{
+    // inputShape: BatchSize = 1, Height = 3, Width = 2, Channels = 1
+    const armnn::TensorShape inputShape{ 1, 3, 2, 1 };
+
+    // outputShape: BatchSize = 1, Height = 3, Width = 5, Channels = 1
+    const armnn::TensorShape outputShape{ 1, 3, 5, 1 };
+
+    return ResizeBilinearMagTestImpl(workloadFactory, inputShape, outputShape, armnn::DataLayout::NHWC);
+}
+
 LayerTestResult<float, 2> FakeQuantizationTest(armnn::IWorkloadFactory& workloadFactory)
 {
     constexpr unsigned int width = 2;
index 12bcdd8..71a468b 100644 (file)
@@ -243,6 +243,13 @@ LayerTestResult<float, 4> ResizeBilinearMinTest(armnn::IWorkloadFactory& workloa
 // Tests the resize bilinear for magnification (output dimensions bigger than input dimensions).
 LayerTestResult<float, 4> ResizeBilinearMagTest(armnn::IWorkloadFactory& workloadFactory);
 
+// Tests that execute Resize Bilinear with NHWC data layout
+LayerTestResult<float, 4> ResizeBilinearNopNhwcTest(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<float, 4> SimpleResizeBilinearNhwcTest(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<float, 4> ResizeBilinearSqMinNhwcTest(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<float, 4> ResizeBilinearMinNhwcTest(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<float, 4> ResizeBilinearMagNhwcTest(armnn::IWorkloadFactory& workloadFactory);
+
 LayerTestResult<float, 4> BatchNormTest(armnn::IWorkloadFactory& workloadFactory);
 
 LayerTestResult<float, 2> FakeQuantizationTest(armnn::IWorkloadFactory& workloadFactory);