IVGCVSW-3213 Extend the Reference BatchNormalization workload to
authorMatteo Martincigh <matteo.martincigh@arm.com>
Tue, 4 Jun 2019 09:59:47 +0000 (10:59 +0100)
committerNarumol Prangnawarat <narumol.prangnawarat@arm.com>
Tue, 4 Jun 2019 15:20:45 +0000 (15:20 +0000)
support the new QSymm16 type

 * Added QSymm16 to the range of supported types for batch
   normalization ref workloads
 * Added unit tests for QSymm16

Change-Id: I5b2fcfbd9cb5af149ebfe24e2d95f3affa2e3690
Signed-off-by: Matteo Martincigh <matteo.martincigh@arm.com>
src/backends/backendsCommon/WorkloadData.cpp
src/backends/backendsCommon/test/LayerTests.cpp
src/backends/backendsCommon/test/LayerTests.hpp
src/backends/reference/RefLayerSupport.cpp
src/backends/reference/test/RefCreateWorkloadTests.cpp
src/backends/reference/test/RefLayerTests.cpp

index a43619a..a373f55 100644 (file)
@@ -692,7 +692,8 @@ void BatchNormalizationQueueDescriptor::Validate(const WorkloadInfo& workloadInf
     {
         DataType::Float16,
         DataType::Float32,
-        DataType::QuantisedAsymm8
+        DataType::QuantisedAsymm8,
+        DataType::QuantisedSymm16
     };
 
     ValidateDataTypes(input,  supportedTypes, "BatchNormalizationQueueDescriptor");
index 5679fff..e96f7dc 100644 (file)
@@ -8259,6 +8259,92 @@ LayerTestResult<uint8_t, 4> BatchNormUint8NhwcTest(
          1.f/20.f, 50, armnn::DataLayout::NHWC);
 }
 
+LayerTestResult<int16_t, 4> BatchNormInt16Test(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    // BatchSize: 1
+    // Channels: 2
+    // Height: 3
+    // Width: 2
+
+    const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
+    std::vector<float> inputValues
+    {
+        // Batch 0, Channel 0, Height (3) x Width (2)
+         1.f, 4.f,
+         4.f, 2.f,
+         1.f, 6.f,
+
+        // Batch 0, Channel 1, Height (3) x Width (2)
+         1.f, 1.f,
+         4.f, 1.f,
+        -2.f, 4.f
+    };
+    std::vector<float> expectedOutputValues
+    {
+        // Batch 0, Channel 0, Height (3) x Width (2)
+        1.f, 4.f,
+        4.f, 2.f,
+        1.f, 6.f,
+
+        // Batch 0, Channel 1, Height (3) x Width (2)
+        3.f, 3.f,
+        4.f, 3.f,
+        2.f, 4.f
+    };
+
+    return BatchNormTestImpl<armnn::DataType::QuantisedSymm16>(
+        workloadFactory, memoryManager,
+        inputOutputShape, inputValues, expectedOutputValues,
+        1.f/20.f, 50, armnn::DataLayout::NCHW);
+}
+
+LayerTestResult<int16_t, 4> BatchNormInt16NhwcTest(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    // BatchSize: 1
+    // Height: 3
+    // Width: 2
+    // Channels: 2
+
+    const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
+    std::vector<float> inputValues
+    {
+        // Batch 0, Height 0, Width (2) x Channel (2)
+        1.f,  1.f,
+        4.f,  1.f,
+
+        // Batch 0, Height 1, Width (2) x Channel (2)
+        4.f,  4.f,
+        2.f,  1.f,
+
+        // Batch 0, Height 2, Width (2) x Channel (2)
+        1.f, -2.f,
+        6.f,  4.f
+    };
+    std::vector<float> expectedOutputValues
+    {
+        // Batch 0, Height 0, Width (2) x Channel (2)
+        1.f, 3.f,
+        4.f, 3.f,
+
+        // Batch 0, Height 1, Width (2) x Channel (2)
+        4.f, 4.f,
+        2.f, 3.f,
+
+        // Batch 0, Height 2, Width (2) x Channel (2)
+        1.f, 2.f,
+        6.f, 4.f
+    };
+
+    return BatchNormTestImpl<armnn::DataType::QuantisedSymm16>
+        (workloadFactory, memoryManager,
+         inputOutputShape, inputValues, expectedOutputValues,
+         1.f/20.f, 50, armnn::DataLayout::NHWC);
+}
+
 LayerTestResult<uint8_t, 4> ConstantUint8CustomQuantizationScaleAndOffsetTest(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
index df79e46..be686c1 100644 (file)
@@ -1043,6 +1043,14 @@ LayerTestResult<uint8_t, 4> BatchNormUint8NhwcTest(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
 
+LayerTestResult<int16_t, 4> BatchNormInt16Test(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<int16_t, 4> BatchNormInt16NhwcTest(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
 LayerTestResult<uint8_t, 4> ConstantUint8CustomQuantizationScaleAndOffsetTest(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
@@ -2000,4 +2008,4 @@ LayerTestResult<T, 4> SimpleFloorTest(
     CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
 
     return ret;
-}
\ No newline at end of file
+}
index aeff51d..adc63e9 100644 (file)
@@ -288,10 +288,11 @@ bool RefLayerSupport::IsBatchNormalizationSupported(const TensorInfo& input,
 {
     ignore_unused(descriptor);
 
-    std::array<DataType, 2> supportedTypes =
+    std::array<DataType, 3> supportedTypes =
     {
         DataType::Float32,
-        DataType::QuantisedAsymm8
+        DataType::QuantisedAsymm8,
+        DataType::QuantisedSymm16
     };
 
     bool supported = true;
index a0c6145..83e3f6c 100644 (file)
@@ -229,6 +229,18 @@ BOOST_AUTO_TEST_CASE(CreateBatchNormalizationUint8WorkloadNhwc)
             (DataLayout::NHWC);
 }
 
+BOOST_AUTO_TEST_CASE(CreateBatchNormalizationInt16Workload)
+{
+    RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationWorkload, armnn::DataType::QuantisedSymm16>
+            (DataLayout::NCHW);
+}
+
+BOOST_AUTO_TEST_CASE(CreateBatchNormalizationInt16WorkloadNhwc)
+{
+    RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationWorkload, armnn::DataType::QuantisedSymm16>
+            (DataLayout::NHWC);
+}
+
 BOOST_AUTO_TEST_CASE(CreateConvertFp16ToFp32Float32Workload)
 {
     Graph                graph;
index 1620270..afeadb9 100644 (file)
@@ -357,6 +357,8 @@ ARMNN_AUTO_TEST_CASE(BatchNorm, BatchNormTest)
 ARMNN_AUTO_TEST_CASE(BatchNormNhwc, BatchNormNhwcTest)
 ARMNN_AUTO_TEST_CASE(BatchNormUint8, BatchNormUint8Test)
 ARMNN_AUTO_TEST_CASE(BatchNormUint8Nhwc, BatchNormUint8NhwcTest)
+ARMNN_AUTO_TEST_CASE(BatchNormInt16, BatchNormInt16Test)
+ARMNN_AUTO_TEST_CASE(BatchNormInt16Nhwc, BatchNormInt16NhwcTest)
 
 // Resize Bilinear - NCHW
 ARMNN_AUTO_TEST_CASE(SimpleResizeBilinear, SimpleResizeBilinearTest, armnn::DataLayout::NCHW)