Add fp16 support for dequantize
authorJan Eilers <jan.eilers@arm.com>
Fri, 1 Nov 2019 11:09:36 +0000 (11:09 +0000)
committerJan Eilers <jan.eilers@arm.com>
Mon, 4 Nov 2019 12:09:08 +0000 (12:09 +0000)
* Changed RefDequantizeWorkload to use Encoder/Decoder
* Added related unit tests for Cl, Neon and Ref

Signed-off-by: Jan Eilers <jan.eilers@arm.com>
Change-Id: Ic2fd4103090dd2127c6859b49305736f7b2dfb05

12 files changed:
src/backends/backendsCommon/WorkloadFactory.cpp
src/backends/backendsCommon/test/layerTests/DequantizeTestImpl.cpp
src/backends/backendsCommon/test/layerTests/DequantizeTestImpl.hpp
src/backends/cl/test/ClLayerTests.cpp
src/backends/neon/test/NeonLayerTests.cpp
src/backends/reference/RefLayerSupport.cpp
src/backends/reference/backend.mk
src/backends/reference/test/RefLayerTests.cpp
src/backends/reference/workloads/CMakeLists.txt
src/backends/reference/workloads/Dequantize.cpp [new file with mode: 0644]
src/backends/reference/workloads/Dequantize.hpp [new file with mode: 0644]
src/backends/reference/workloads/RefDequantizeWorkload.cpp

index b4b4ffc..4a7f007 100644 (file)
@@ -266,7 +266,7 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId,
             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
 
             result = layerSupportObject->IsDequantizeSupported(OverrideDataType(input, dataType),
-                                                               OverrideDataType(output, DataType::Float32),
+                                                               output,
                                                                reason);
             break;
         }
index 42673d5..6a3e852 100644 (file)
 namespace
 {
 
-template<typename T, std::size_t Dim>
-LayerTestResult<float, Dim> DequantizeTestImpl(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    const armnn::TensorInfo& inputTensorInfo,
-    const armnn::TensorInfo& outputTensorInfo,
-    const std::vector<T>& inputData,
-    const std::vector<float>& expectedOutputData,
-    armnn::DequantizeQueueDescriptor descriptor)
+template<typename T, std::size_t Dim, typename T1=float>
+LayerTestResult<T1, Dim> DequantizeTestImpl(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+        const armnn::TensorInfo& inputTensorInfo,
+        const armnn::TensorInfo& outputTensorInfo,
+        const std::vector<T>& inputData,
+        const std::vector<T1>& expectedOutputData,
+        armnn::DequantizeQueueDescriptor descriptor)
 {
     boost::multi_array<T, Dim> input = MakeTensor<T, Dim>(inputTensorInfo, inputData);
 
-    LayerTestResult<float, Dim> ret(outputTensorInfo);
-    ret.outputExpected = MakeTensor<float, Dim>(outputTensorInfo, expectedOutputData);
+    LayerTestResult<T1, Dim> ret(outputTensorInfo);
+    ret.outputExpected = MakeTensor<T1, Dim>(outputTensorInfo, expectedOutputData);
 
     std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
@@ -53,8 +53,10 @@ LayerTestResult<float, Dim> DequantizeTestImpl(
     return ret;
 }
 
-template <armnn::DataType ArmnnInputType>
-LayerTestResult<float, 4> DequantizeSimpleTest(
+template <armnn::DataType ArmnnInputType,
+          armnn::DataType ArmnnOutputType=armnn::DataType::Float32,
+          typename OutType=armnn::ResolveType<ArmnnOutputType>>
+LayerTestResult<OutType, 4> DequantizeSimpleTest(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
@@ -63,7 +65,7 @@ LayerTestResult<float, 4> DequantizeSimpleTest(
     armnn::DequantizeQueueDescriptor desc;
 
     const armnn::TensorInfo inputTensorInfo({1, 2, 2, 3}, ArmnnInputType, 0.5f, 0);
-    const armnn::TensorInfo outputTensorInfo({1, 2, 2, 3}, armnn::DataType::Float32);
+    const armnn::TensorInfo outputTensorInfo({1, 2, 2, 3}, ArmnnOutputType);
 
     std::vector<T> inputData = std::vector<T>(
     {
@@ -73,21 +75,19 @@ LayerTestResult<float, 4> DequantizeSimpleTest(
         20, 22, 24,
     });
 
-    std::vector<float> expectedOutputData = std::vector<float>(
+    std::vector<OutType> expectedOutputData;
+    for (OutType i = OutType(1); i <= OutType(12); ++i)
     {
-        1.0f,   2.0f,  3.0f,
-        4.0f,   5.0f,  6.0f,
-        7.0f,   8.0f,  9.0f,
-        10.0f, 11.0f, 12.0f,
-    });
-
-    return DequantizeTestImpl<T, 4>(workloadFactory,
-                                    memoryManager,
-                                    inputTensorInfo,
-                                    outputTensorInfo,
-                                    inputData,
-                                    expectedOutputData,
-                                    desc);
+        expectedOutputData.push_back(i);
+    }
+
+    return DequantizeTestImpl<T, 4, OutType>(workloadFactory,
+                                             memoryManager,
+                                             inputTensorInfo,
+                                             outputTensorInfo,
+                                             inputData,
+                                             expectedOutputData,
+                                             desc);
 }
 
 template <armnn::DataType ArmnnInputType>
@@ -149,3 +149,19 @@ LayerTestResult<float, 4> DequantizeSimpleInt16Test(
 {
     return DequantizeSimpleTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
 }
+
+LayerTestResult<armnn::Half, 4> DequantizeSimpleUint8ToFp16Test(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    return DequantizeSimpleTest<armnn::DataType::QuantisedAsymm8, armnn::DataType::Float16>(workloadFactory,
+                                                                                            memoryManager);
+}
+
+LayerTestResult<armnn::Half, 4> DequantizeSimpleInt16ToFp16Test(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    return DequantizeSimpleTest<armnn::DataType::QuantisedSymm16, armnn::DataType::Float16>(workloadFactory,
+                                                                                            memoryManager);
+}
index 55ea4b4..4d83780 100644 (file)
@@ -10,6 +10,8 @@
 #include <backendsCommon/IBackendInternal.hpp>
 #include <backendsCommon/WorkloadFactory.hpp>
 
+#include <Half.hpp>
+
 LayerTestResult<float, 4> DequantizeSimpleUint8Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
@@ -21,3 +23,11 @@ LayerTestResult<float, 4> DequantizeOffsetUint8Test(
 LayerTestResult<float, 4> DequantizeSimpleInt16Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<armnn::Half, 4> DequantizeSimpleUint8ToFp16Test(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<armnn::Half, 4> DequantizeSimpleInt16ToFp16Test(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
index 4e987db..909ebc7 100644 (file)
@@ -710,11 +710,11 @@ ARMNN_AUTO_TEST_CASE(QuantizeSimpleUint8, QuantizeSimpleUint8Test)
 ARMNN_AUTO_TEST_CASE(QuantizeClampUint8, QuantizeClampUint8Test)
 
 // Dequantize
-// NOTE: current clframework (46a49a0a8206f0efa7afd514940e180a88ffd732)
-//       CLDequantizationLayerKernel accepts DataType::QASYMM8 input
-//       and can output DataType::F16 or DataType::F32
 ARMNN_AUTO_TEST_CASE(DequantizeSimpleUint8, DequantizeSimpleUint8Test)
 ARMNN_AUTO_TEST_CASE(DequantizeOffsetUint8, DequantizeOffsetUint8Test)
+ARMNN_AUTO_TEST_CASE(DequantizeSimpleInt16, DequantizeSimpleInt16Test)
+ARMNN_AUTO_TEST_CASE(DequantizeSimpleUint8ToFp16, DequantizeSimpleUint8ToFp16Test)
+ARMNN_AUTO_TEST_CASE(DequantizeSimpleInt16ToFp16, DequantizeSimpleInt16ToFp16Test)
 
 // TransposeConvolution2d
 ARMNN_AUTO_TEST_CASE(SimpleTransposeConvolution2dFloatNchw,
index 50a2d1d..046ca2a 100644 (file)
@@ -325,8 +325,10 @@ BOOST_AUTO_TEST_CASE(DepthwiseConv2dUtils)
 }
 
 // Dequantize
+// Fp16 is only supported if __ARM_FEATURE_FP16_VECTOR_ARITHMETIC is enabled
 ARMNN_AUTO_TEST_CASE(DequantizeSimpleUint8, DequantizeSimpleUint8Test)
 ARMNN_AUTO_TEST_CASE(DequantizeOffsetUint8, DequantizeOffsetUint8Test)
+ARMNN_AUTO_TEST_CASE(DequantizeSimpleInt16, DequantizeSimpleInt16Test)
 
 // Pooling
 ARMNN_AUTO_TEST_CASE(SimpleMaxPooling2dSize3x3Stride2x4, SimpleMaxPooling2dSize3x3Stride2x4Test, true)
index 5972158..716e8d9 100644 (file)
@@ -587,8 +587,9 @@ bool RefLayerSupport::IsDequantizeSupported(const TensorInfo& input,
     supported &= CheckSupportRule(TypeAnyOf(input, supportedInputTypes), reasonIfUnsupported,
                                   "Reference dequantize: input type not supported.");
 
-    std::array<DataType,1> supportedOutputTypes = {
-        DataType::Float32
+    std::array<DataType,2> supportedOutputTypes = {
+        DataType::Float32,
+        DataType::Float16
     };
 
     supported &= CheckSupportRule(TypeAnyOf(output, supportedOutputTypes), reasonIfUnsupported,
index 7e97acd..5f9af59 100644 (file)
@@ -31,6 +31,7 @@ BACKEND_SOURCES := \
         workloads/Debug.cpp \
         workloads/DepthToSpace.cpp \
         workloads/DetectionPostProcess.cpp \
+        workloads/Dequantize.cpp \
         workloads/ElementwiseFunction.cpp \
         workloads/FullyConnected.cpp \
         workloads/Gather.cpp \
index 1b284c3..7f28038 100644 (file)
@@ -1378,6 +1378,8 @@ BOOST_AUTO_TEST_CASE(DetectionPostProcessFastNmsInt16)
 ARMNN_AUTO_TEST_CASE(DequantizeSimpleUint8, DequantizeSimpleUint8Test)
 ARMNN_AUTO_TEST_CASE(DequantizeOffsetUint8, DequantizeOffsetUint8Test)
 ARMNN_AUTO_TEST_CASE(DequantizeSimpleInt16, DequantizeSimpleInt16Test)
+ARMNN_AUTO_TEST_CASE(DequantizeSimpleUint8ToFp16, DequantizeSimpleUint8ToFp16Test)
+ARMNN_AUTO_TEST_CASE(DequantizeSimpleInt16ToFp16, DequantizeSimpleInt16ToFp16Test)
 
 // Quantize
 ARMNN_AUTO_TEST_CASE(QuantizeSimpleUint8, QuantizeSimpleUint8Test)
index 7844518..29abfed 100644 (file)
@@ -28,6 +28,8 @@ list(APPEND armnnRefBackendWorkloads_sources
     DepthToSpace.hpp
     DetectionPostProcess.cpp
     DetectionPostProcess.hpp
+    Dequantize.cpp
+    Dequantize.hpp
     ElementwiseFunction.cpp
     ElementwiseFunction.hpp
     Encoders.hpp
diff --git a/src/backends/reference/workloads/Dequantize.cpp b/src/backends/reference/workloads/Dequantize.cpp
new file mode 100644 (file)
index 0000000..fafc03e
--- /dev/null
@@ -0,0 +1,29 @@
+//
+// Copyright © 2019 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "Dequantize.hpp"
+
+namespace armnn
+{
+
+void Dequantize(Decoder<float>& inputDecoder,
+                Encoder<float>& outputEncoder,
+                const TensorInfo& inputInfo,
+                const TensorInfo& outputInfo)
+{
+    BOOST_ASSERT(inputInfo.GetNumElements() == outputInfo.GetNumElements());
+    for (unsigned int i = 0; i < inputInfo.GetNumElements(); i++)
+    {
+        // inputDecoder.Get() dequantizes the data element from whatever
+        // type is given by inputInfo to fp32 (If MakeDecoder supports that dequantization)
+        // outputEncoder.Set() transforms the data element to whatever type is
+        // given by outputInfo (if MakeEncoder supports that transformation)
+        outputEncoder.Set(inputDecoder.Get());
+        ++outputEncoder;
+        ++inputDecoder;
+    }
+}
+
+} // armnn namespace
\ No newline at end of file
diff --git a/src/backends/reference/workloads/Dequantize.hpp b/src/backends/reference/workloads/Dequantize.hpp
new file mode 100644 (file)
index 0000000..c01b454
--- /dev/null
@@ -0,0 +1,20 @@
+//
+// Copyright © 2019 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <armnn/Tensor.hpp>
+#include "Encoders.hpp"
+#include "Decoders.hpp"
+
+namespace armnn
+{
+
+void Dequantize(Decoder<float>& inputDecoder,
+                Encoder<float>& outputEncoder,
+                const TensorInfo& inputInfo,
+                const TensorInfo& outputInfo);
+
+} //namespace armnn
index d861c50..e6f5c6b 100644 (file)
@@ -5,6 +5,9 @@
 
 #include "RefDequantizeWorkload.hpp"
 #include "RefWorkloadUtils.hpp"
+#include "Encoders.hpp"
+#include "Decoders.hpp"
+#include "Dequantize.hpp"
 
 namespace armnn
 {
@@ -14,21 +17,12 @@ void RefDequantizeWorkload::Execute() const
     ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefDequantizeWorkload_Execute");
 
     const TensorInfo& inputInfo = GetTensorInfo(m_Data.m_Inputs[0]);
-    const DataType& inputDataType = inputInfo.GetDataType();
+    const TensorInfo& outputInfo = GetTensorInfo(m_Data.m_Outputs[0]);
 
-    float* outputData = GetOutputTensorData<float>(0, m_Data);
+    auto inputDecoder  = MakeDecoder<float>(inputInfo,  m_Data.m_Inputs[0]->Map());
+    auto outputEncoder = MakeEncoder<float>(outputInfo, m_Data.m_Outputs[0]->Map());
 
-    switch (inputDataType)
-    {
-        case DataType::QuantisedAsymm8:
-            Dequantize<uint8_t>(GetInputTensorData<uint8_t>(0, m_Data), outputData, inputInfo);
-            break;
-        case DataType::QuantisedSymm16:
-            Dequantize<int16_t>(GetInputTensorData<int16_t>(0, m_Data), outputData, inputInfo);
-            break;
-        default:
-            throw InvalidArgumentException("RefDequantizeWorkload: Unsupported input data type");
-    }
+    Dequantize(*inputDecoder, *outputEncoder, inputInfo, outputInfo);
 }
 
 } // namespace armnn