IVGCVSW-3835 Create Encoder and Decoder for QSymm8PerAxis
authorKeith Davis <keith.davis@arm.com>
Mon, 4 Nov 2019 08:58:33 +0000 (08:58 +0000)
committerKeith Davis <keith.davis@arm.com>
Mon, 4 Nov 2019 16:46:35 +0000 (16:46 +0000)
 * Add QuantizedSymm8PerAxis to armnn DataType (types.hpp) and
 * Add Quantize and Dequantize template for int8 in TypeUtils to be able to compute QSymm8 of the weight
 * Create PerAxisIterator for per-axis quantization
 * Create QSymm8PerAxisDecoder
 * Create QSymm8PerAxisEncoder

Signed-off-by: Keith Davis <keith.davis@arm.com>
Change-Id: Ibcfe0288a197b7ee50b543bdbd77b7edb8a547c2

include/armnn/Types.hpp
src/armnn/TypesUtils.cpp
src/armnnUtils/TensorUtils.hpp
src/backends/reference/workloads/BaseIterator.hpp
src/backends/reference/workloads/Decoders.hpp
src/backends/reference/workloads/Encoders.hpp

index 16a148c9c27e6afb544133a5c95f325746c06dc4..51162e6cf38dd659222fd66a2cd025c1adb4ec43 100644 (file)
@@ -31,7 +31,8 @@ enum class DataType
     QuantisedAsymm8 = 2,
     Signed32 = 3,
     Boolean = 4,
-    QuantisedSymm16 = 5
+    QuantisedSymm16 = 5,
+    QuantizedSymm8PerAxis = 6
 };
 
 enum class DataLayout
index cdc30da8ca65577e25c001051ac383eb536b5bdd..83c56c491c0a9e0daf1ecae1f62c4db5234f0150 100644 (file)
@@ -33,6 +33,10 @@ float armnn::Dequantize(QuantizedType value, float scale, int32_t offset)
     return dequantized;
 }
 
+/// Explicit specialization of Quantize for int8_t
+template
+int8_t armnn::Quantize<int8_t>(float value, float scale, int32_t offset);
+
 /// Explicit specialization of Quantize for uint8_t
 template
 uint8_t armnn::Quantize<uint8_t>(float value, float scale, int32_t offset);
@@ -45,6 +49,10 @@ int16_t armnn::Quantize<int16_t>(float value, float scale, int32_t offset);
 template
 int32_t armnn::Quantize<int32_t>(float value, float scale, int32_t offset);
 
+/// Explicit specialization of Dequantize for int8_t
+template
+float armnn::Dequantize<int8_t>(int8_t value, float scale, int32_t offset);
+
 /// Explicit specialization of Dequantize for uint8_t
 template
 float armnn::Dequantize<uint8_t>(uint8_t value, float scale, int32_t offset);
index 2b1f6a24f3e91d7b1c1254cd63da0e834434b958..32af179bdca76160e95e8dc9138bfbb152830388 100644 (file)
@@ -7,6 +7,8 @@
 
 #include <armnn/TypesUtils.hpp>
 
+#include <boost/assert.hpp>
+
 namespace armnnUtils
 {
 armnn::TensorShape GetTensorShape(unsigned int numberOfBatches,
@@ -32,4 +34,32 @@ unsigned int GetNumElementsBetween(const armnn::TensorShape& shape,
 
 unsigned int GetUnsignedAxis(const unsigned int inputDimension, const int axis);
 
+inline unsigned int GetNumElementsAfter(const armnn::TensorShape& shape,
+                                        unsigned int axis)
+{
+    unsigned int numDim = shape.GetNumDimensions();
+    BOOST_ASSERT(0 >= axis);
+    BOOST_ASSERT(axis < numDim - 1);
+    unsigned int count = 1;
+    for (unsigned int i = axis; i < numDim; i++)
+    {
+        count *= shape[i];
+    }
+    return count;
+}
+
+inline std::pair<unsigned int, std::vector<float>> GetPerAxisParams(const armnn::TensorInfo& info)
+{
+    const std::vector<float>& scales = info.GetQuantizationScales();
+    armnn::Optional<unsigned int> quantizationDim = info.GetQuantizationDim();
+    if (scales.size() < 1 || !quantizationDim.has_value())
+    {
+        throw armnn::InvalidArgumentException(
+        "We currently support only per-axis symmetric quantization for QuantizedSymm8.");
+    }
+    unsigned int axisFactor = GetNumElementsAfter(info.GetShape(), quantizationDim.value());
+
+    return {axisFactor, scales};
+}
+
 } // namespace armnnUtils
index 18270faf469d70403410e1a11583aaeed8386b8c..9fe3f15f9b822c59d8d96bf859678fe83595ae65 100644 (file)
@@ -339,4 +339,116 @@ public:
     }
 };
 
-} //namespace armnn
+// PerAxisIterator for per-axis quantization
+template<typename T, typename Base>
+class PerAxisIterator : public Base
+{
+public:
+    // axisFactor is used to calculate axisIndex
+    PerAxisIterator(T* data = nullptr, unsigned int axisFactor = 0)
+        : m_Iterator(data), m_Start(data), m_AxisIndex(0), m_AxisFactor(axisFactor)
+    {}
+
+    // This should be called to set index for per-axis Encoder/Decoder
+    PerAxisIterator& SetIndex(unsigned int index, unsigned int axisIndex)
+    {
+         BOOST_ASSERT(m_Iterator);
+         m_Iterator = m_Start + index;
+         m_AxisIndex = axisIndex;
+         return *this;
+    }
+
+    void Reset(void* data) override
+    {
+        m_Iterator = reinterpret_cast<T*>(data);
+        m_Start = m_Iterator;
+        m_AxisIndex = 0;
+    }
+
+    PerAxisIterator& operator++() override
+    {
+        BOOST_ASSERT(m_Iterator);
+        ++m_Iterator;
+        m_AxisIndex = static_cast<unsigned int>(*m_Iterator) % m_AxisFactor;
+        return *this;
+    }
+
+    PerAxisIterator& operator+=(const unsigned int increment) override
+    {
+        BOOST_ASSERT(m_Iterator);
+        m_Iterator += increment;
+        m_AxisIndex = static_cast<unsigned int>(*m_Iterator) % m_AxisFactor;
+        return *this;
+    }
+
+    PerAxisIterator& operator-=(const unsigned int decrement) override
+    {
+        BOOST_ASSERT(m_Iterator);
+        m_Iterator -= decrement;
+        m_AxisIndex = static_cast<unsigned int>(*m_Iterator) % m_AxisFactor;
+        return *this;
+    }
+
+    PerAxisIterator& operator[](const unsigned int index) override
+    {
+        BOOST_ASSERT(m_Iterator);
+        m_Iterator = m_Start + index;
+        m_AxisIndex = static_cast<unsigned int>(*m_Iterator) % m_AxisFactor;
+        return *this;
+    }
+
+    protected:
+        T* m_Iterator;
+        T* m_Start;
+        unsigned int m_AxisIndex;
+        unsigned int m_AxisFactor;
+};
+
+class QSymm8PerAxisDecoder : public PerAxisIterator<const int8_t, Decoder<float>>
+{
+public:
+    QSymm8PerAxisDecoder(const int8_t* data, const std::vector<float>& scale, unsigned int axisFactor)
+        : PerAxisIterator(data, axisFactor), m_Scale(scale) {}
+
+    float Get() const override
+    {
+        return armnn::Dequantize(*m_Iterator, m_Scale[m_AxisIndex], 0);
+    }
+
+    // Get scale of the current value
+    float GetScale() const
+    {
+        return m_Scale[m_AxisIndex];
+    }
+
+private:
+    std::vector<float> m_Scale;
+};
+
+class QSymm8PerAxisEncoder : public PerAxisIterator<int8_t, Encoder<float>>
+{
+public:
+    QSymm8PerAxisEncoder(int8_t* data, const std::vector<float>& scale, unsigned int axisFactor)
+        : PerAxisIterator(data, axisFactor), m_Scale(scale) {}
+
+    void Set(float right)
+    {
+        *m_Iterator = armnn::Quantize<int8_t>(right, m_Scale[m_AxisIndex], 0);
+    }
+
+    float Get() const
+    {
+        return armnn::Dequantize(*m_Iterator, m_Scale[m_AxisIndex], 0);
+    }
+
+    // Get scale of the current value
+    float GetScale() const
+    {
+        return m_Scale[m_AxisIndex];
+    }
+
+private:
+    std::vector<float> m_Scale;
+};
+
+} //namespace armnn
\ No newline at end of file
index 328a5eb0f7d4c8c2c2d0c945d593a9e416bf334c..dd2b28a50fd9c79cea572bb57edc719d3bc9da50 100644 (file)
@@ -7,6 +7,7 @@
 
 #include "BaseIterator.hpp"
 #include "FloatingPointConverter.hpp"
+#include "TensorUtils.hpp"
 
 #include <boost/assert.hpp>
 
@@ -21,6 +22,14 @@ inline std::unique_ptr<Decoder<float>> MakeDecoder(const TensorInfo& info, const
 {
     switch(info.GetDataType())
     {
+        case armnn::DataType::QuantizedSymm8PerAxis:
+        {
+            std::pair<unsigned int, std::vector<float>> params = armnnUtils::GetPerAxisParams(info);
+            return std::make_unique<QSymm8PerAxisDecoder>(
+                static_cast<const int8_t*>(data),
+                params.second,
+                params.first);
+        }
         case DataType::QuantisedAsymm8:
         {
             return std::make_unique<QASymm8Decoder>(
@@ -55,7 +64,7 @@ inline std::unique_ptr<Decoder<float>> MakeDecoder(const TensorInfo& info, const
         }
         default:
         {
-            BOOST_ASSERT_MSG(false, "Not supported Data Type!");
+            BOOST_ASSERT_MSG(false, "Unsupported Data Type!");
             break;
         }
     }
index 2b3a11af063ef50fc16ccbcbff9d054650c04881..5c0cffa7ca8038f229ac4caeb51190665b64e62f 100644 (file)
@@ -6,6 +6,7 @@
 #pragma once
 
 #include "BaseIterator.hpp"
+#include "TensorUtils.hpp"
 
 #include <boost/assert.hpp>
 
@@ -20,6 +21,14 @@ inline std::unique_ptr<Encoder<float>> MakeEncoder(const TensorInfo& info, void*
 {
     switch(info.GetDataType())
     {
+        case armnn::DataType::QuantizedSymm8PerAxis:
+        {
+            std::pair<unsigned int, std::vector<float>> params = armnnUtils::GetPerAxisParams(info);
+            return std::make_unique<QSymm8PerAxisEncoder>(
+                static_cast<int8_t*>(data),
+                params.second,
+                params.first);
+        }
         case armnn::DataType::QuantisedAsymm8:
         {
             return std::make_unique<QASymm8Encoder>(
@@ -48,7 +57,7 @@ inline std::unique_ptr<Encoder<float>> MakeEncoder(const TensorInfo& info, void*
         }
         default:
         {
-            BOOST_ASSERT_MSG(false, "Cannot encode from float. Not supported target Data Type!");
+            BOOST_ASSERT_MSG(false, "Unsupported target Data Type!");
             break;
         }
     }