IVGCVSW-4409 Fix TfLiteParser to handle all datatypes correctly
authorRyan OShea <Ryan.OShea2@arm.com>
Fri, 7 Feb 2020 17:22:22 +0000 (17:22 +0000)
committerRyan O'Shea <ryan.oshea2@arm.com>
Mon, 10 Feb 2020 13:37:53 +0000 (13:37 +0000)
 * Added check for QAsymmS8 datatype for Per-Tensor quantization

Signed-off-by: Ryan OShea <Ryan.OShea2@arm.com>
Change-Id: I902aa3863dd5cefdce90e68be8a5750dd6ff9e54

src/armnnTfLiteParser/TfLiteParser.cpp

index 10bb0f6..560cdf1 100644 (file)
@@ -315,7 +315,14 @@ armnn::TensorInfo ToTensorInfo(TfLiteParser::TensorRawPtr tensorPtr, const std::
             type = armnn::DataType::Float32;
             break;
         case tflite::TensorType_INT8:
-            type = armnn::DataType::QSymmS8;
+            if (tensorPtr->quantization->zero_point.size() == 1 && tensorPtr->quantization->zero_point[0] != 0)
+            {
+                type = armnn::DataType::QAsymmS8;
+            }
+            else
+            {
+                type = armnn::DataType::QSymmS8;
+            }
             break;
         case tflite::TensorType_INT16:
             type = armnn::DataType::QSymmS16;
@@ -359,7 +366,7 @@ armnn::TensorInfo ToTensorInfo(TfLiteParser::TensorRawPtr tensorPtr, const std::
             if (tensorPtr->quantization->zero_point.size() == 1)
             {
                 // NOTE: we lose precision here when converting from 64 bit to 32
-                //       but this is what we support at the monent in ArmNN
+                //       but this is what we support at the moment in ArmNN
                 quantizationOffset = boost::numeric_cast<int32_t>(tensorPtr->quantization->zero_point[0]);
             }