IVGCVSW-2657: Fix to force correct quantisation parameters for QASYMM8 Softmax
authorDavid Monahan <david.monahan@arm.com>
Thu, 25 Apr 2019 15:03:38 +0000 (16:03 +0100)
committerDavid Monahan <david.monahan@arm.com>
Mon, 29 Apr 2019 07:15:23 +0000 (07:15 +0000)
Signed-off-by: David Monahan <david.monahan@arm.com>
Change-Id: I0989ea843714ba1d5da756bb87ddefa3706b07eb

include/armnn/INetwork.hpp
src/armnn/Network.cpp

index a15ceb1..7141770 100644 (file)
@@ -219,6 +219,8 @@ public:
         const char* name = nullptr) = 0;
 
     /// Adds a softmax layer to the network.
+    /// If the data type is QAsymm8, then the output quantization parameters
+    /// must have a scale of 1/256 and an offset of 0
     /// @param softmaxDescriptor - SoftmaxDescriptor to configure the softmax.
     /// @param name - Optional name for the layer.
     /// @return - Interface for configuring the layer.
index 0bd8d4b..a38bcf1 100644 (file)
@@ -114,8 +114,8 @@ bool CheckScaleSetOnQuantizedType(Layer* layer, Optional<std::vector<std::string
     bool noErrors = true;
     unsigned int numOutputs = layer->GetNumOutputSlots();
     for (unsigned int i = 0; i < numOutputs; i++) {
-        const OutputSlot &outputSlot = layer->GetOutputSlot(i);
-        const TensorInfo &info = outputSlot.GetTensorInfo();
+        OutputSlot& outputSlot = layer->GetOutputSlot(i);
+        TensorInfo info = outputSlot.GetTensorInfo();
         if (DataType::QuantisedAsymm8 == info.GetDataType()) {
             if (0.f == info.GetQuantizationScale()) {
                 noErrors = false;
@@ -125,6 +125,20 @@ bool CheckScaleSetOnQuantizedType(Layer* layer, Optional<std::vector<std::string
                    << " Quantized 8 bit but its scale parameter has not been set";
                 ReportError(ss.str(), errMessages);
             }
+            // Softmax under QuantisedAsymm8 must always be scale (1.0f/256.0f) and offset 0
+            if ((info.GetQuantizationScale() != (1.0f / 256.0f) ||
+                 info.GetQuantizationOffset() != 0) &&
+                 layer->GetType() == armnn::LayerType::Softmax)
+            {
+                std::stringstream ss;
+                ss << "Quantization parameters for Softmax layer (Scale: " <<
+                info.GetQuantizationScale() << " and Offset: " << info.GetQuantizationOffset() <<
+                ") are incorrect and have been updated to Scale: 0.00390625 and Offset: 0";
+                BOOST_LOG_TRIVIAL(warning) << ss.str();
+                info.SetQuantizationScale((1.0f /256.0f));
+                info.SetQuantizationOffset(0);
+                outputSlot.SetTensorInfo(info);
+            }
         }
     }
     return noErrors;