Rename quantized data types to remove ambiguity for signed/unsigned payloads
authorDerek Lamberti <derek.lamberti@arm.com>
Fri, 10 Jan 2020 17:14:08 +0000 (17:14 +0000)
committerKevin May <kevin.may@arm.com>
Mon, 13 Jan 2020 18:18:12 +0000 (18:18 +0000)
!android-nn-driver:2572

Change-Id: I8fe52ceb09987b3d05c539409510f535165455cc
Signed-off-by: Derek Lamberti <derek.lamberti@arm.com>
143 files changed:
include/armnn/Types.hpp
include/armnn/TypesUtils.hpp
include/armnnQuantizer/INetworkQuantizer.hpp
src/armnn/CompatibleTypes.hpp
src/armnn/LayerSupportCommon.hpp
src/armnn/Network.cpp
src/armnn/NetworkQuantizationScheme.hpp
src/armnn/NetworkQuantizer.cpp
src/armnn/NetworkQuantizerUtils.cpp
src/armnn/ResolveType.hpp
src/armnn/Tensor.cpp
src/armnn/test/ConstTensorLayerVisitor.cpp
src/armnn/test/CreateWorkload.hpp
src/armnn/test/NetworkTests.cpp
src/armnn/test/OptimizerTests.cpp
src/armnn/test/QuantizerTest.cpp
src/armnn/test/RuntimeTests.cpp
src/armnn/test/TensorTest.cpp
src/armnn/test/UtilsTests.cpp
src/armnnDeserializer/Deserializer.cpp
src/armnnDeserializer/test/DeserializeActivation.cpp
src/armnnDeserializer/test/DeserializeAdd.cpp
src/armnnDeserializer/test/DeserializeComparison.cpp
src/armnnDeserializer/test/DeserializeConstant.cpp
src/armnnDeserializer/test/DeserializeDivision.cpp
src/armnnDeserializer/test/DeserializeFullyConnected.cpp
src/armnnDeserializer/test/DeserializeMultiplication.cpp
src/armnnDeserializer/test/DeserializePad.cpp
src/armnnDeserializer/test/DeserializePermute.cpp
src/armnnDeserializer/test/DeserializePooling2d.cpp
src/armnnDeserializer/test/DeserializeReshape.cpp
src/armnnDeserializer/test/DeserializeSubtraction.cpp
src/armnnQuantizer/ArmNNQuantizerMain.cpp
src/armnnSerializer/ArmnnSchema.fbs
src/armnnSerializer/Serializer.cpp
src/armnnSerializer/SerializerUtils.cpp
src/armnnSerializer/test/SerializerTests.cpp
src/armnnTfLiteParser/TfLiteParser.cpp
src/armnnTfLiteParser/test/Addition.cpp
src/armnnTfLiteParser/test/AvgPool2D.cpp
src/armnnTfLiteParser/test/Concatenation.cpp
src/armnnTfLiteParser/test/Constant.cpp
src/armnnTfLiteParser/test/Conv2D.cpp
src/armnnTfLiteParser/test/DepthwiseConvolution2D.cpp
src/armnnTfLiteParser/test/Dequantize.cpp
src/armnnTfLiteParser/test/DetectionPostProcess.cpp
src/armnnTfLiteParser/test/FullyConnected.cpp
src/armnnTfLiteParser/test/MaxPool2D.cpp
src/armnnTfLiteParser/test/Reshape.cpp
src/armnnTfLiteParser/test/Softmax.cpp
src/armnnTfLiteParser/test/Split.cpp
src/armnnTfLiteParser/test/Squeeze.cpp
src/armnnTfLiteParser/test/Sub.cpp
src/armnnTfLiteParser/test/TransposeConv.cpp
src/armnnTfLiteParser/test/Unpack.cpp
src/armnnTfLiteParser/test/Unsupported.cpp
src/backends/aclCommon/ArmComputeTensorUtils.cpp
src/backends/aclCommon/test/ArmComputeTensorUtilsTests.cpp
src/backends/backendsCommon/LayerSupportRules.hpp
src/backends/backendsCommon/MakeWorkloadHelper.hpp
src/backends/backendsCommon/Workload.hpp
src/backends/backendsCommon/WorkloadData.cpp
src/backends/backendsCommon/WorkloadFactory.cpp
src/backends/backendsCommon/WorkloadUtils.cpp
src/backends/backendsCommon/test/EndToEndTestImpl.hpp
src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
src/backends/backendsCommon/test/JsonPrinterTestImpl.cpp
src/backends/backendsCommon/test/LayerReleaseConstantDataTest.cpp
src/backends/backendsCommon/test/OptimizeSubgraphViewTests.cpp
src/backends/backendsCommon/test/QuantizedLstmEndToEndTestImpl.cpp
src/backends/backendsCommon/test/WorkloadDataValidation.cpp
src/backends/backendsCommon/test/WorkloadTestUtils.hpp
src/backends/backendsCommon/test/layerTests/AbsTestImpl.cpp
src/backends/backendsCommon/test/layerTests/ActivationTestImpl.cpp
src/backends/backendsCommon/test/layerTests/AdditionTestImpl.cpp
src/backends/backendsCommon/test/layerTests/ArgMinMaxTestImpl.cpp
src/backends/backendsCommon/test/layerTests/BatchNormalizationTestImpl.cpp
src/backends/backendsCommon/test/layerTests/ComparisonTestImpl.cpp
src/backends/backendsCommon/test/layerTests/ConcatTestImpl.cpp
src/backends/backendsCommon/test/layerTests/ConstantTestImpl.cpp
src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp
src/backends/backendsCommon/test/layerTests/DebugTestImpl.cpp
src/backends/backendsCommon/test/layerTests/DepthToSpaceTestImpl.cpp
src/backends/backendsCommon/test/layerTests/DequantizeTestImpl.cpp
src/backends/backendsCommon/test/layerTests/DivisionTestImpl.cpp
src/backends/backendsCommon/test/layerTests/FloorTestImpl.cpp
src/backends/backendsCommon/test/layerTests/FullyConnectedTestImpl.cpp
src/backends/backendsCommon/test/layerTests/GatherTestImpl.cpp
src/backends/backendsCommon/test/layerTests/L2NormalizationTestImpl.cpp
src/backends/backendsCommon/test/layerTests/LstmTestImpl.cpp
src/backends/backendsCommon/test/layerTests/MaximumTestImpl.cpp
src/backends/backendsCommon/test/layerTests/MinimumTestImpl.cpp
src/backends/backendsCommon/test/layerTests/MultiplicationTestImpl.cpp
src/backends/backendsCommon/test/layerTests/PadTestImpl.cpp
src/backends/backendsCommon/test/layerTests/Pooling2dTestImpl.cpp
src/backends/backendsCommon/test/layerTests/QuantizeTestImpl.cpp
src/backends/backendsCommon/test/layerTests/ReshapeTestImpl.cpp
src/backends/backendsCommon/test/layerTests/ResizeTestImpl.cpp
src/backends/backendsCommon/test/layerTests/RsqrtTestImpl.cpp
src/backends/backendsCommon/test/layerTests/SliceTestImpl.cpp
src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.cpp
src/backends/backendsCommon/test/layerTests/SpaceToBatchNdTestImpl.cpp
src/backends/backendsCommon/test/layerTests/SpaceToDepthTestImpl.cpp
src/backends/backendsCommon/test/layerTests/SplitterTestImpl.cpp
src/backends/backendsCommon/test/layerTests/StridedSliceTestImpl.cpp
src/backends/backendsCommon/test/layerTests/SubtractionTestImpl.cpp
src/backends/backendsCommon/test/layerTests/TransposeConvolution2dTestImpl.cpp
src/backends/cl/ClLayerSupport.cpp
src/backends/cl/test/ClCreateWorkloadTests.cpp
src/backends/cl/test/ClEndToEndTests.cpp
src/backends/cl/test/ClLayerSupportTests.cpp
src/backends/cl/test/ClLayerTests.cpp
src/backends/cl/workloads/ClGreaterWorkload.cpp
src/backends/cl/workloads/ClGreaterWorkload.hpp
src/backends/cl/workloads/ClWorkloadUtils.hpp
src/backends/neon/test/NeonCreateWorkloadTests.cpp
src/backends/neon/test/NeonEndToEndTests.cpp
src/backends/neon/test/NeonLayerSupportTests.cpp
src/backends/neon/test/NeonLayerTests.cpp
src/backends/neon/workloads/NeonFullyConnectedWorkload.cpp
src/backends/neon/workloads/NeonGreaterWorkload.cpp
src/backends/neon/workloads/NeonGreaterWorkload.hpp
src/backends/neon/workloads/NeonWorkloadUtils.hpp
src/backends/reference/RefLayerSupport.cpp
src/backends/reference/RefWorkloadFactory.cpp
src/backends/reference/test/RefCreateWorkloadTests.cpp
src/backends/reference/test/RefEndToEndTests.cpp
src/backends/reference/test/RefLayerSupportTests.cpp
src/backends/reference/test/RefLayerTests.cpp
src/backends/reference/workloads/Decoders.hpp
src/backends/reference/workloads/Encoders.hpp
src/backends/reference/workloads/RefDebugWorkload.cpp
src/backends/reference/workloads/RefDebugWorkload.hpp
src/backends/reference/workloads/RefPadWorkload.cpp
src/backends/reference/workloads/RefPadWorkload.hpp
src/backends/reference/workloads/RefPermuteWorkload.cpp
src/backends/reference/workloads/RefPermuteWorkload.hpp
src/backends/reference/workloads/RefQuantizeWorkload.cpp
tests/DeepSpeechV1Database.hpp
tests/ImageTensorGenerator/ImageTensorGenerator.cpp
tests/ImageTensorGenerator/ImageTensorGenerator.hpp
tests/ModelAccuracyTool-Armnn/ModelAccuracyTool-Armnn.cpp
tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp

index dcc8c9e52c31b5799c56a2efea5da4f30a25ec7d..e5a7fc0987195f29529d29fe148645f665715222 100644 (file)
@@ -10,6 +10,7 @@
 #include <stdint.h>
 #include "BackendId.hpp"
 #include "Exceptions.hpp"
+#include "Deprecated.hpp"
 
 namespace armnn
 {
@@ -32,12 +33,15 @@ enum class DataType
 {
     Float16 = 0,
     Float32 = 1,
-    QuantisedAsymm8 = 2,
+    QAsymmU8 = 2,
     Signed32 = 3,
     Boolean = 4,
-    QuantisedSymm16 = 5,
+    QSymmS16 = 5,
     QuantizedSymm8PerAxis = 6,
-    QSymmS8 = 7
+    QSymmS8 = 7,
+
+    QuantisedAsymm8 ARMNN_DEPRECATED_MSG("Use DataType::QAsymmU8 instead.") = QAsymmU8,
+    QuantisedSymm16 ARMNN_DEPRECATED_MSG("Use DataType::QSymmS16 instead.") = QSymmS16
 };
 
 enum class DataLayout
index 4394d9ddd95afee427b981a76790de9e6542d311..065b6839fc13ff3006434a58db578ba7efc6c701 100644 (file)
@@ -115,10 +115,10 @@ constexpr unsigned int GetDataTypeSize(DataType dataType)
         case DataType::Float16:               return 2U;
         case DataType::Float32:
         case DataType::Signed32:              return 4U;
-        case DataType::QuantisedAsymm8:       return 1U;
+        case DataType::QAsymmU8:              return 1U;
         case DataType::QSymmS8:               return 1U;
         case DataType::QuantizedSymm8PerAxis: return 1U;
-        case DataType::QuantisedSymm16:       return 2U;
+        case DataType::QSymmS16:       return 2U;
         case DataType::Boolean:               return 1U;
         default:                              return 0U;
     }
@@ -163,10 +163,10 @@ constexpr const char* GetDataTypeName(DataType dataType)
     {
         case DataType::Float16:               return "Float16";
         case DataType::Float32:               return "Float32";
-        case DataType::QuantisedAsymm8:       return "QAsymm8";
-        case DataType::QSymmS8:               return "QSymm8";
+        case DataType::QAsymmU8:              return "QAsymmU8";
+        case DataType::QSymmS8:               return "QSymmS8";
         case DataType::QuantizedSymm8PerAxis: return "QSymm8PerAxis";
-        case DataType::QuantisedSymm16:       return "QSymm16";
+        case DataType::QSymmS16:       return "QSymm16";
         case DataType::Signed32:              return "Signed32";
         case DataType::Boolean:               return "Boolean";
 
@@ -199,9 +199,9 @@ constexpr bool IsQuantizedType()
 
 constexpr bool IsQuantizedType(DataType dataType)
 {
-    return dataType == DataType::QuantisedAsymm8 ||
+    return dataType == DataType::QAsymmU8        ||
            dataType == DataType::QSymmS8         ||
-           dataType == DataType::QuantisedSymm16 ||
+           dataType == DataType::QSymmS16 ||
            dataType == DataType::QuantizedSymm8PerAxis;
 }
 
index 826b077f6e28577e4b06c0014fdee521af3e5e83..5fc5763216a4ebc171df9fa27a8944e67d3d59aa 100644 (file)
@@ -14,7 +14,7 @@ namespace armnn
 
 struct QuantizerOptions
 {
-    QuantizerOptions() : QuantizerOptions(DataType::QuantisedAsymm8, false) {}
+    QuantizerOptions() : QuantizerOptions(DataType::QAsymmU8, false) {}
 
     QuantizerOptions(DataType activationFormat) : QuantizerOptions(activationFormat, false) {}
 
index cc545a96429174703aa582007a2764defbfffa36..bca092ca0c12aa0a8701e9c37ef3a86698d322df 100644 (file)
@@ -32,7 +32,7 @@ inline bool CompatibleTypes<Half>(DataType dataType)
 template<>
 inline bool CompatibleTypes<uint8_t>(DataType dataType)
 {
-    return dataType == DataType::Boolean || dataType == DataType::QuantisedAsymm8;
+    return dataType == DataType::Boolean || dataType == DataType::QAsymmU8;
 }
 
 template<>
@@ -44,7 +44,7 @@ inline bool CompatibleTypes<int8_t>(DataType dataType)
 template<>
 inline bool CompatibleTypes<int16_t>(DataType dataType)
 {
-    return dataType == DataType::QuantisedSymm16;
+    return dataType == DataType::QSymmS16;
 }
 
 template<>
index 557e72a323ad766b4cacd07ac2a6852be3147863..e0c6b8040c20ea93257077c4703b669424fbf732 100644 (file)
@@ -40,7 +40,7 @@ bool IsSupportedForDataTypeGeneric(Optional<std::string&> reasonIfUnsupported,
             return float16FuncPtr(reasonIfUnsupported, std::forward<Params>(params)...);
         case DataType::Float32:
             return float32FuncPtr(reasonIfUnsupported, std::forward<Params>(params)...);
-        case DataType::QuantisedAsymm8:
+        case DataType::QAsymmU8:
             return uint8FuncPtr(reasonIfUnsupported, std::forward<Params>(params)...);
         case DataType::Signed32:
             return int32FuncPtr(reasonIfUnsupported, std::forward<Params>(params)...);
index 1406160914ed08bc36fbf8543d7688923aa04906..554e2e26a5187dc500412077f6b2824e1ec20429 100644 (file)
@@ -101,7 +101,7 @@ bool CheckScaleSetOnQuantizedType(Layer* layer, Optional<std::vector<std::string
     for (unsigned int i = 0; i < numOutputs; i++) {
         OutputSlot& outputSlot = layer->GetOutputSlot(i);
         TensorInfo info = outputSlot.GetTensorInfo();
-        if (DataType::QuantisedAsymm8 == info.GetDataType()) {
+        if (DataType::QAsymmU8 == info.GetDataType()) {
             if (0.f == info.GetQuantizationScale()) {
                 noErrors = false;
                 std::stringstream ss;
index ea3c29102b4bd5d088e4e1fcd96141ea69f15df1..a5b7542748b8cfc42f4ca6bf67cfc81079c7bb8b 100644 (file)
@@ -58,7 +58,7 @@ struct QAsymm8QuantizationScheme : IQuantizationScheme
 
     int NumBits() const override { return 8; }
 
-    DataType GetDataType() const override { return DataType::QuantisedAsymm8; }
+    DataType GetDataType() const override { return DataType::QAsymmU8; }
 };
 
 struct QSymmS8QuantizationScheme : IQuantizationScheme
@@ -119,7 +119,7 @@ struct QSymm16QuantizationScheme : IQuantizationScheme
 
     int NumBits() const override { return 16; }
 
-    DataType GetDataType() const override { return DataType::QuantisedSymm16; }
+    DataType GetDataType() const override { return DataType::QSymmS16; }
 };
 
 } // namespace armnn
index f6d625fda3fe374bf3ec19c38b8d625e5825c073..5e00de5fcb41b85d98a6403a80e366bb6c823911 100644 (file)
@@ -160,13 +160,13 @@ INetworkPtr NetworkQuantizer::ExportNetwork()
     std::unique_ptr<IQuantizationScheme> quantizationScheme;
     switch (m_Options.m_ActivationFormat)
     {
-        case DataType::QuantisedAsymm8:
+        case DataType::QAsymmU8:
             quantizationScheme = std::make_unique<QAsymm8QuantizationScheme>();
             break;
         case DataType::QSymmS8:
             quantizationScheme = std::make_unique<QSymmS8QuantizationScheme>();
             break;
-        case DataType::QuantisedSymm16:
+        case DataType::QSymmS16:
             quantizationScheme = std::make_unique<QSymm16QuantizationScheme>();
             break;
         default:
index a6f9ebdc42385763bafc124e042a42f9a9fa4644..75473b4ae628ab5d4f1365ee3007ba6cea115c7b 100644 (file)
@@ -36,7 +36,7 @@ ConstTensor CreateQuantizedConst(const ConstTensor& tensor, std::vector<uint8_t>
             BOOST_ASSERT_MSG(false, "Can't quantize unsupported data type");
     }
 
-    TensorInfo qInfo(tensor.GetInfo().GetShape(), DataType::QuantisedAsymm8, scale, offset);
+    TensorInfo qInfo(tensor.GetInfo().GetShape(), DataType::QAsymmU8, scale, offset);
     return ConstTensor(qInfo, backing);
 }
 
index ba3d0fca5c612082859c026a2d5893240a04937b..c7a244dba857a4f7bc73e46c11a4e78d195e4a34 100644 (file)
@@ -27,7 +27,7 @@ struct ResolveTypeImpl<DataType::Float32>
 };
 
 template<>
-struct ResolveTypeImpl<DataType::QuantisedAsymm8>
+struct ResolveTypeImpl<DataType::QAsymmU8>
 {
     using Type = uint8_t;
 };
@@ -39,7 +39,7 @@ struct ResolveTypeImpl<DataType::QSymmS8>
 };
 
 template<>
-struct ResolveTypeImpl<DataType::QuantisedSymm16>
+struct ResolveTypeImpl<DataType::QSymmS16>
 {
     using Type = int16_t;
 };
index 171e02ad1328185335e2e95fe180734a89b7bfb6..8eebc43cb54ce65dfb205f9c586c5b56092e394d 100644 (file)
@@ -289,7 +289,7 @@ void TensorInfo::SetQuantizationDim(const Optional<unsigned int>& quantizationDi
 
 bool TensorInfo::IsQuantized() const
 {
-    return m_DataType == DataType::QuantisedAsymm8 || m_DataType == DataType::QuantisedSymm16;
+    return m_DataType == DataType::QAsymmU8 || m_DataType == DataType::QSymmS16;
 }
 
 // ---
index cfcdb1d2ff8bd5878f2cd880797274c4805d772d..ada665e4e991f98797f9d24ffee93838276e671d 100644 (file)
@@ -1248,43 +1248,43 @@ BOOST_AUTO_TEST_CASE(CheckQuantizedLstmLayer)
     std::vector<uint8_t> inputToInputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
     std::vector<unsigned int> inputToInputWeightsDimensions = {1, 1, 3, 3};
     ConstTensor inputToInputWeights(
-            TensorInfo(4, inputToInputWeightsDimensions.data(), DataType::QuantisedAsymm8), inputToInputWeightsData);
+            TensorInfo(4, inputToInputWeightsDimensions.data(), DataType::QAsymmU8), inputToInputWeightsData);
 
     std::vector<uint8_t> inputToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
     std::vector<unsigned int> inputToForgetWeightsDimensions = {1, 1, 3, 3};
     ConstTensor inputToForgetWeights(
-            TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::QuantisedAsymm8), inputToForgetWeightsData);
+            TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::QAsymmU8), inputToForgetWeightsData);
 
     std::vector<uint8_t> inputToCellWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
     std::vector<unsigned int> inputToCellWeightsDimensions = {1, 1, 3, 3};
     ConstTensor inputToCellWeights(
-            TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::QuantisedAsymm8), inputToCellWeightsData);
+            TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::QAsymmU8), inputToCellWeightsData);
 
     std::vector<uint8_t> inputToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
     std::vector<unsigned int> inputToOutputWeightsDimensions = {1, 1, 3, 3};
     ConstTensor inputToOutputWeights(
-            TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::QuantisedAsymm8), inputToOutputWeightsData);
+            TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::QAsymmU8), inputToOutputWeightsData);
 
 
     std::vector<uint8_t> recurrentToInputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
     std::vector<unsigned int> recurrentToInputWeightsDimensions = {1, 1, 3, 3};
     ConstTensor recurrentToInputWeights(TensorInfo(
-            4, recurrentToInputWeightsDimensions.data(), DataType::QuantisedAsymm8), recurrentToInputWeightsData);
+            4, recurrentToInputWeightsDimensions.data(), DataType::QAsymmU8), recurrentToInputWeightsData);
 
     std::vector<uint8_t> recurrentToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
     std::vector<unsigned int> recurrentToForgetWeightsDimensions = {1, 1, 3, 3};
     ConstTensor recurrentToForgetWeights(TensorInfo(
-            4, recurrentToForgetWeightsDimensions.data(), DataType::QuantisedAsymm8), recurrentToForgetWeightsData);
+            4, recurrentToForgetWeightsDimensions.data(), DataType::QAsymmU8), recurrentToForgetWeightsData);
 
     std::vector<uint8_t> recurrentToCellWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
     std::vector<unsigned int> recurrentToCellWeightsDimensions = {1, 1, 3, 3};
     ConstTensor recurrentToCellWeights(TensorInfo(
-            4, recurrentToCellWeightsDimensions.data(), DataType::QuantisedAsymm8), recurrentToCellWeightsData);
+            4, recurrentToCellWeightsDimensions.data(), DataType::QAsymmU8), recurrentToCellWeightsData);
 
     std::vector<uint8_t> recurrentToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
     std::vector<unsigned int> recurrentToOutputWeightsDimensions = {1, 1, 3, 3};
     ConstTensor recurrentToOutputWeights(TensorInfo(
-            4, recurrentToOutputWeightsDimensions.data(), DataType::QuantisedAsymm8), recurrentToOutputWeightsData);
+            4, recurrentToOutputWeightsDimensions.data(), DataType::QAsymmU8), recurrentToOutputWeightsData);
 
 
     std::vector<int32_t> inputGateBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
@@ -1338,43 +1338,43 @@ BOOST_AUTO_TEST_CASE(CheckNamedQuantizedLstmLayer)
     std::vector<uint8_t> inputToInputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
     std::vector<unsigned int> inputToInputWeightsDimensions = {1, 1, 3, 3};
     ConstTensor inputToInputWeights(
-            TensorInfo(4, inputToInputWeightsDimensions.data(), DataType::QuantisedAsymm8), inputToInputWeightsData);
+            TensorInfo(4, inputToInputWeightsDimensions.data(), DataType::QAsymmU8), inputToInputWeightsData);
 
     std::vector<uint8_t> inputToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
     std::vector<unsigned int> inputToForgetWeightsDimensions = {1, 1, 3, 3};
     ConstTensor inputToForgetWeights(
-            TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::QuantisedAsymm8), inputToForgetWeightsData);
+            TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::QAsymmU8), inputToForgetWeightsData);
 
     std::vector<uint8_t> inputToCellWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
     std::vector<unsigned int> inputToCellWeightsDimensions = {1, 1, 3, 3};
     ConstTensor inputToCellWeights(
-            TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::QuantisedAsymm8), inputToCellWeightsData);
+            TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::QAsymmU8), inputToCellWeightsData);
 
     std::vector<uint8_t> inputToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
     std::vector<unsigned int> inputToOutputWeightsDimensions = {1, 1, 3, 3};
     ConstTensor inputToOutputWeights(
-            TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::QuantisedAsymm8), inputToOutputWeightsData);
+            TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::QAsymmU8), inputToOutputWeightsData);
 
 
     std::vector<uint8_t> recurrentToInputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
     std::vector<unsigned int> recurrentToInputWeightsDimensions = {1, 1, 3, 3};
     ConstTensor recurrentToInputWeights(TensorInfo(
-            4, recurrentToInputWeightsDimensions.data(), DataType::QuantisedAsymm8), recurrentToInputWeightsData);
+            4, recurrentToInputWeightsDimensions.data(), DataType::QAsymmU8), recurrentToInputWeightsData);
 
     std::vector<uint8_t> recurrentToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
     std::vector<unsigned int> recurrentToForgetWeightsDimensions = {1, 1, 3, 3};
     ConstTensor recurrentToForgetWeights(TensorInfo(
-            4, recurrentToForgetWeightsDimensions.data(), DataType::QuantisedAsymm8), recurrentToForgetWeightsData);
+            4, recurrentToForgetWeightsDimensions.data(), DataType::QAsymmU8), recurrentToForgetWeightsData);
 
     std::vector<uint8_t> recurrentToCellWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
     std::vector<unsigned int> recurrentToCellWeightsDimensions = {1, 1, 3, 3};
     ConstTensor recurrentToCellWeights(TensorInfo(
-            4, recurrentToCellWeightsDimensions.data(), DataType::QuantisedAsymm8), recurrentToCellWeightsData);
+            4, recurrentToCellWeightsDimensions.data(), DataType::QAsymmU8), recurrentToCellWeightsData);
 
     std::vector<uint8_t> recurrentToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
     std::vector<unsigned int> recurrentToOutputWeightsDimensions = {1, 1, 3, 3};
     ConstTensor recurrentToOutputWeights(TensorInfo(
-            4, recurrentToOutputWeightsDimensions.data(), DataType::QuantisedAsymm8), recurrentToOutputWeightsData);
+            4, recurrentToOutputWeightsDimensions.data(), DataType::QAsymmU8), recurrentToOutputWeightsData);
 
 
     std::vector<int32_t> inputGateBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
index f6928f858f9ba70777cafc3845b5a34a86056722..02ce12a30464f22b14ac6c85aba6df66eeb3753d 100644 (file)
@@ -399,12 +399,12 @@ std::unique_ptr<QuantizedLstmWorkload> CreateQuantizedLstmWorkloadTest(armnn::IW
 
     // Weights and bias tensor and quantization info
     armnn::TensorInfo inputWeightsInfo({outputSize, inputSize},
-                                       armnn::DataType::QuantisedAsymm8,
+                                       armnn::DataType::QAsymmU8,
                                        weightsScale,
                                        weightsOffset);
 
     armnn::TensorInfo recurrentWeightsInfo({outputSize, outputSize},
-                                           armnn::DataType::QuantisedAsymm8,
+                                           armnn::DataType::QAsymmU8,
                                            weightsScale,
                                            weightsOffset);
 
@@ -463,17 +463,17 @@ std::unique_ptr<QuantizedLstmWorkload> CreateQuantizedLstmWorkloadTest(armnn::IW
 
     // Input/output tensor info and quantization info
     armnn::TensorInfo inputInfo({numBatches , inputSize},
-                                armnn::DataType::QuantisedAsymm8,
+                                armnn::DataType::QAsymmU8,
                                 inputOutputScale,
                                 inputOutputOffset);
 
     armnn::TensorInfo cellStateInfo({numBatches , outputSize},
-                                    armnn::DataType::QuantisedSymm16,
+                                    armnn::DataType::QSymmS16,
                                     cellStateScale,
                                     cellStateOffset);
 
     armnn::TensorInfo outputStateInfo({numBatches , outputSize},
-                                      armnn::DataType::QuantisedAsymm8,
+                                      armnn::DataType::QAsymmU8,
                                       inputOutputScale,
                                       inputOutputOffset);
 
@@ -530,8 +530,8 @@ std::unique_ptr<Convolution2dWorkload> CreateDirectConvolution2dWorkloadTest(arm
 
     Convolution2dLayer* const layer = graph.AddLayer<Convolution2dLayer>(layerDesc, "layer");
 
-    float inputsQScale = DataType == armnn::DataType::QuantisedAsymm8 ? 1.0f : 0.0;
-    float outputQScale = DataType == armnn::DataType::QuantisedAsymm8 ? 2.0f : 0.0;
+    float inputsQScale = DataType == armnn::DataType::QAsymmU8 ? 1.0f : 0.0;
+    float outputQScale = DataType == armnn::DataType::QAsymmU8 ? 2.0f : 0.0;
 
     layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({ 2, 3, 3, 3 }, DataType, inputsQScale));
     layer->m_Bias   = std::make_unique<ScopedCpuTensorHandle>
@@ -637,8 +637,8 @@ std::unique_ptr<FullyConnectedWorkload> CreateFullyConnectedWorkloadTest(armnn::
 
     FullyConnectedLayer* const layer = graph.AddLayer<FullyConnectedLayer>(layerDesc, "layer");
 
-    float inputsQScale = DataType == armnn::DataType::QuantisedAsymm8 ? 1.0f : 0.0;
-    float outputQScale = DataType == armnn::DataType::QuantisedAsymm8 ? 2.0f : 0.0;
+    float inputsQScale = DataType == armnn::DataType::QAsymmU8 ? 1.0f : 0.0;
+    float outputQScale = DataType == armnn::DataType::QAsymmU8 ? 2.0f : 0.0;
 
     layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({7, 20}, DataType, inputsQScale, 0));
     layer->m_Bias   = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({7}, GetBiasDataType(DataType), inputsQScale));
@@ -1361,7 +1361,7 @@ std::pair<armnn::IOptimizedNetworkPtr, std::unique_ptr<PreCompiledWorkload>> Cre
 
     if (biasEnabled)
     {
-        constexpr armnn::DataType biasDataType = ( dataType == armnn::DataType::QuantisedAsymm8) ?
+        constexpr armnn::DataType biasDataType = ( dataType == armnn::DataType::QAsymmU8) ?
             armnn::DataType::Signed32 : armnn::DataType::Float32;
 
         TensorInfo biasTensorInfo(TensorShape({16}), biasDataType, 0.9f * 0.9f, 0);
@@ -1396,14 +1396,14 @@ std::pair<armnn::IOptimizedNetworkPtr, std::unique_ptr<PreCompiledWorkload>> Cre
 
     // set the tensors in the network (NHWC format)
     TensorInfo inputTensorInfo(TensorShape({ 1, 16, 16, 16 }), dataType);
-    if (dataType == armnn::DataType::QuantisedAsymm8)
+    if (dataType == armnn::DataType::QAsymmU8)
     {
         inputTensorInfo.SetQuantizationOffset(0);
         inputTensorInfo.SetQuantizationScale(0.9f);
     }
 
     TensorInfo outputTensorInfo(TensorShape({1, 16, 16, 16}), dataType);
-    if (dataType == armnn::DataType::QuantisedAsymm8)
+    if (dataType == armnn::DataType::QAsymmU8)
     {
         outputTensorInfo.SetQuantizationOffset(0);
         outputTensorInfo.SetQuantizationScale(0.9f);
index d8b4e17a3c13baefbbcb473a82c4e728df99d004..5d1313f61fd4d6c6e0614defcc000eb174ab841b 100644 (file)
@@ -392,7 +392,7 @@ BOOST_AUTO_TEST_CASE(Network_AddQuantize)
             BOOST_TEST((infoIn.GetDataType() == armnn::DataType::Float32));
 
             const armnn::TensorInfo& infoOut = layer->GetOutputSlot(0).GetTensorInfo();
-            BOOST_TEST((infoOut.GetDataType() == armnn::DataType::QuantisedAsymm8));
+            BOOST_TEST((infoOut.GetDataType() == armnn::DataType::QAsymmU8));
         }
 
         bool m_Visited = false;
@@ -411,7 +411,7 @@ BOOST_AUTO_TEST_CASE(Network_AddQuantize)
     armnn::TensorInfo infoIn({3,1}, armnn::DataType::Float32);
     input->GetOutputSlot(0).SetTensorInfo(infoIn);
 
-    armnn::TensorInfo infoOut({3,1}, armnn::DataType::QuantisedAsymm8);
+    armnn::TensorInfo infoOut({3,1}, armnn::DataType::QAsymmU8);
     quantize->GetOutputSlot(0).SetTensorInfo(infoOut);
 
     Test testQuantize;
index ba18aa31e8c0acbbb8de687188f86f15f3f0b188..e310d4f1400d46177ec01390b781077398124223 100644 (file)
@@ -496,15 +496,15 @@ BOOST_AUTO_TEST_CASE(GatherValidateTensorShapesFromInputsMultiDimIndices)
 BOOST_AUTO_TEST_CASE(DetectionPostProcessValidateTensorShapes)
 {
     Graph graph;
-    armnn::TensorInfo boxEncodingsInfo({1, 10, 4}, DataType::QuantisedAsymm8);
-    armnn::TensorInfo scoresInfo({1, 10, 4}, DataType::QuantisedAsymm8);
+    armnn::TensorInfo boxEncodingsInfo({1, 10, 4}, DataType::QAsymmU8);
+    armnn::TensorInfo scoresInfo({1, 10, 4}, DataType::QAsymmU8);
     std::vector<uint8_t> anchorsVector(40);
-    armnn::ConstTensor anchors(armnn::TensorInfo({10, 4}, armnn::DataType::QuantisedAsymm8), anchorsVector);
+    armnn::ConstTensor anchors(armnn::TensorInfo({10, 4}, armnn::DataType::QAsymmU8), anchorsVector);
 
-    armnn::TensorInfo detectionBoxesInfo({1, 3, 4}, DataType::QuantisedAsymm8);
-    armnn::TensorInfo detectionScoresInfo({1, 3}, DataType::QuantisedAsymm8);
-    armnn::TensorInfo detectionClassesInfo({1, 3}, DataType::QuantisedAsymm8);
-    armnn::TensorInfo numDetectionInfo({1}, DataType::QuantisedAsymm8);
+    armnn::TensorInfo detectionBoxesInfo({1, 3, 4}, DataType::QAsymmU8);
+    armnn::TensorInfo detectionScoresInfo({1, 3}, DataType::QAsymmU8);
+    armnn::TensorInfo detectionClassesInfo({1, 3}, DataType::QAsymmU8);
+    armnn::TensorInfo numDetectionInfo({1}, DataType::QAsymmU8);
 
     Layer* input0 = graph.AddLayer<InputLayer>(0, "boxEncodings");
     input0->GetOutputSlot().SetTensorInfo(boxEncodingsInfo);
index e147a84eb6111a608bd09f67fd711f277835a9af..900aa1813efb29b8b2ce755d70ccb3e8ca506ba7 100644 (file)
@@ -82,17 +82,17 @@ protected:
     {
         switch (m_QuantizerOptions.m_ActivationFormat)
         {
-            case DataType::QuantisedAsymm8:
+            case DataType::QAsymmU8:
                 TestQuantizationParamsImpl(
-                    info, DataType::QuantisedAsymm8, qAsymm8Params.first, qAsymm8Params.second);
+                    info, DataType::QAsymmU8, qAsymm8Params.first, qAsymm8Params.second);
                 break;
             case DataType::QSymmS8:
                 TestQuantizationParamsImpl(
                         info, DataType::QSymmS8, qSymm8Params.first, qSymm8Params.second);
                 break;
-            case DataType::QuantisedSymm16:
+            case DataType::QSymmS16:
                 TestQuantizationParamsImpl(
-                    info, DataType::QuantisedSymm16, qSymm16Params.first, qSymm16Params.second);
+                    info, DataType::QSymmS16, qSymm16Params.first, qSymm16Params.second);
                 break;
             default:
                 throw InvalidArgumentException("Unsupported quantization target");
@@ -106,27 +106,27 @@ protected:
 
     void TestConstantQuantizationParams(const TensorInfo& info,
                                         const OffsetScalePair& params,
-                                        DataType dataType = DataType::QuantisedAsymm8)
+                                        DataType dataType = DataType::QAsymmU8)
     {
         boost::ignore_unused(dataType);
-        TestQuantizationParamsImpl(info, DataType::QuantisedAsymm8, params.first, params.second);
+        TestQuantizationParamsImpl(info, DataType::QAsymmU8, params.first, params.second);
     }
 
     void TestBiasQuantizationParams(const TensorInfo& info,
                                     const OffsetScalePair& qAsymm8Params,
                                     const OffsetScalePair& qSymm8Params,
                                     const OffsetScalePair& qSymm16Params,
-                                    DataType dataType = DataType::QuantisedAsymm8)
+                                    DataType dataType = DataType::QAsymmU8)
     {
         switch (m_QuantizerOptions.m_ActivationFormat)
         {
-            case DataType::QuantisedAsymm8:
+            case DataType::QAsymmU8:
                 TestQuantizationParamsImpl(info, dataType, qAsymm8Params.first, qAsymm8Params.second);
                 break;
             case DataType::QSymmS8:
                 TestQuantizationParamsImpl(info, dataType, qSymm8Params.first, qSymm8Params.second);
                 break;
-            case DataType::QuantisedSymm16:
+            case DataType::QSymmS16:
                 TestQuantizationParamsImpl(info, dataType, qSymm16Params.first, qSymm16Params.second);
                 break;
             default:
@@ -237,7 +237,7 @@ BOOST_AUTO_TEST_CASE(QuantizeAddition)
     TestAdditionQuantization validatorQSymm8(qSymm8Options, shape, shape);
     VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
 
-    const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+    const QuantizerOptions qSymm16options(DataType::QSymmS16);
     INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork();
     TestAdditionQuantization validatorQSymm16(qSymm16options, shape, shape);
     VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
@@ -399,7 +399,7 @@ BOOST_AUTO_TEST_CASE(QuantizeAbsActivation)
     TestActivationQuantization validatorQSymm8(qSymm8Options, shape, shape);
     VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
 
-    const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+    const QuantizerOptions qSymm16options(DataType::QSymmS16);
     INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork();
     TestActivationQuantization validatorQSymm16(qSymm16options, shape, shape);
     VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
@@ -424,7 +424,7 @@ BOOST_AUTO_TEST_CASE(QuantizeLinearActivation)
     TestActivationQuantization validatorQSymm8(qSymm8Options, shape, shape);
     VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
 
-    const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+    const QuantizerOptions qSymm16options(DataType::QSymmS16);
     INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork();
     TestActivationQuantization validatorQSymm16(qSymm16options, shape, shape);
     VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
@@ -449,7 +449,7 @@ BOOST_AUTO_TEST_CASE(QuantizeReLuActivation)
     TestActivationQuantization validatorQSymm8(qSymm8Options, shape, shape);
     VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
 
-    const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+    const QuantizerOptions qSymm16options(DataType::QSymmS16);
     INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork();
     TestActivationQuantization validatorQSymm16(qSymm16options, shape, shape);
     VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
@@ -474,7 +474,7 @@ BOOST_AUTO_TEST_CASE(QuantizeSoftReLuActivation)
     TestActivationQuantization validatorQSymm8(qSymm8Options, shape, shape);
     VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
 
-    const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+    const QuantizerOptions qSymm16options(DataType::QSymmS16);
     INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork();
     TestActivationQuantization validatorQSymm16(qSymm16options, shape, shape);
     VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
@@ -524,7 +524,7 @@ BOOST_AUTO_TEST_CASE(QuantizeBoundedReluActivation)
     TestBoundedReluActivationQuantization validatorQSymm8(qSymm8Options, shape, shape);
     VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
 
-    const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+    const QuantizerOptions qSymm16options(DataType::QSymmS16);
     INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork();
     TestBoundedReluActivationQuantization validatorQSymm16(qSymm16options, shape, shape);
     VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
@@ -575,7 +575,7 @@ BOOST_AUTO_TEST_CASE(QuantizeTanHActivation)
     TestTanHActivationQuantization validatorQSymm8(qSymm8Options, shape, shape);
     VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
 
-    const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+    const QuantizerOptions qSymm16options(DataType::QSymmS16);
     INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork();
     TestTanHActivationQuantization validatorQSymm16(qSymm16options, shape, shape);
     VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
@@ -636,7 +636,7 @@ BOOST_AUTO_TEST_CASE(QuantizeLeakyReLuActivation)
     TestLeakyReLuActivationQuantization validatorQSymm8(qSymm8Options, shape, shape);
     VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
 
-    const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+    const QuantizerOptions qSymm16options(DataType::QSymmS16);
     INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork();
     TestLeakyReLuActivationQuantization validatorQSymm16(qSymm16options, shape, shape);
     VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
@@ -719,7 +719,7 @@ BOOST_AUTO_TEST_CASE(QuantizeBatchNorm)
     TestBatchNormalizationQuantization validatorQSymm8(qSymm8Options, shape, shape);
     VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
 
-    const QuantizerOptions QQsymm16Options(DataType::QuantisedSymm16);
+    const QuantizerOptions QQsymm16Options(DataType::QSymmS16);
     INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), QQsymm16Options)->ExportNetwork();
     TestBatchNormalizationQuantization validatorQSymm16(QQsymm16Options, shape, shape);
     VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
@@ -784,7 +784,7 @@ BOOST_AUTO_TEST_CASE(QuantizeDepthToSpace)
     VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
 
     // test QSymm16 quantization
-    const QuantizerOptions Qsymm16Options(DataType::QuantisedSymm16);
+    const QuantizerOptions Qsymm16Options(DataType::QSymmS16);
     INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), Qsymm16Options)->ExportNetwork();
     TestDepthToSpaceQuantization validatorQSymm16(Qsymm16Options, inputShape, outputShape);
     VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
@@ -944,7 +944,7 @@ void ValidateFullyConnectedLayer(const bool biasEnabled)
     TestFullyConnectedQuantization validatorQSymm8(qSymm8Options, shape, shape);
     VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
 
-    const QuantizerOptions Qsymm16Options(DataType::QuantisedSymm16);
+    const QuantizerOptions Qsymm16Options(DataType::QSymmS16);
     INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), Qsymm16Options)->ExportNetwork();
     TestFullyConnectedQuantization validatorQSymm16(Qsymm16Options, shape, shape);
     VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
@@ -1025,7 +1025,7 @@ void TestQuantizeConvolution2d(bool useBiases)
     TestConv2dQuantization validatorQSymm8(qSymm8Options, shape, shape);
     VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
 
-    const QuantizerOptions Qsymm16Options(DataType::QuantisedSymm16);
+    const QuantizerOptions Qsymm16Options(DataType::QSymmS16);
     INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), Qsymm16Options)->ExportNetwork();
     TestConv2dQuantization validatorQSymm16(Qsymm16Options, shape, shape);
     VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
@@ -1106,7 +1106,7 @@ void TestQuantizeDepthwiseConvolution2d(bool useBiases)
     TestDepthwiseConv2dQuantization validatorQSymm8(qSymm8Options, shape, shape);
     VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
 
-    const QuantizerOptions Qsymm16Options(DataType::QuantisedSymm16);
+    const QuantizerOptions Qsymm16Options(DataType::QSymmS16);
     INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), Qsymm16Options)->ExportNetwork();
     TestDepthwiseConv2dQuantization validatorQSymm16(Qsymm16Options, shape, shape);
     VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
@@ -1177,7 +1177,7 @@ BOOST_AUTO_TEST_CASE(QuantizeInstanceNormalization)
     VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
 
     // test QSymm16 quantization
-    const QuantizerOptions qSymm16Options(DataType::QuantisedSymm16);
+    const QuantizerOptions qSymm16Options(DataType::QSymmS16);
     INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16Options)->ExportNetwork();
     TestInstanceNormalizationQuantization validatorQSymm16(qSymm16Options, tensorShape, tensorShape);
     VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
@@ -1241,7 +1241,7 @@ BOOST_AUTO_TEST_CASE(QuantizeLogSoftmax)
     VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
 
     // test QuantisedSymm16 quantization
-    const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+    const QuantizerOptions qSymm16options(DataType::QSymmS16);
     INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork();
     TestLogSoftmaxQuantization validatorQSymm16(qSymm16options, tensorShape, tensorShape);
     VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
@@ -1311,7 +1311,7 @@ BOOST_AUTO_TEST_CASE(QuantizeSoftmax)
     TestSoftmaxQuantization validatorQSymm8(qSymm8Options, shape, shape);
     VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
 
-    const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+    const QuantizerOptions qSymm16options(DataType::QSymmS16);
     INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork();
     TestSoftmaxQuantization validatorQSymm16(qSymm16options, shape, shape);
     VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
@@ -1348,7 +1348,7 @@ BOOST_AUTO_TEST_CASE(QuantizeStandIn)
                       armnn::UnimplementedException);
 
     // test QuantisedSymm16 quantization
-    const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+    const QuantizerOptions qSymm16options(DataType::QSymmS16);
     BOOST_CHECK_THROW(INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork(),
                       armnn::UnimplementedException);
 }
@@ -1434,7 +1434,7 @@ BOOST_AUTO_TEST_CASE(QuantizePermute)
     TestPermuteQuantization validatorQSymm8(qSymm8Options, shape, shape);
     VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
 
-    const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+    const QuantizerOptions qSymm16options(DataType::QSymmS16);
     INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork();
     TestPermuteQuantization validatorQSymm16(qSymm16options, shape, shape);
     VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
@@ -1484,7 +1484,7 @@ BOOST_AUTO_TEST_CASE(QuantizeSpaceToBatch)
     TestSpaceToBatchQuantization validatorQSymm8(qSymm8Options, shape, shape);
     VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
 
-    const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+    const QuantizerOptions qSymm16options(DataType::QSymmS16);
     INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork();
     TestSpaceToBatchQuantization validatorQSymm16(qSymm16options, shape, shape);
     VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
@@ -1536,7 +1536,7 @@ BOOST_AUTO_TEST_CASE(QuantizeSpaceToDepth)
     TestSpaceToDepthQuantization validatorQSymm8(qSymm8Options, shape, shape);
     VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
 
-    const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+    const QuantizerOptions qSymm16options(DataType::QSymmS16);
     INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork();
     TestSpaceToDepthQuantization validatorQSymm16(qSymm16options, shape, shape);
     VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
@@ -1600,7 +1600,7 @@ BOOST_AUTO_TEST_CASE(QuantizePooling2d)
     TestPooling2dQuantization validatorQSymm8(qSymm8Options, shape, shape);
     VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
 
-    const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+    const QuantizerOptions qSymm16options(DataType::QSymmS16);
     INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork();
     TestPooling2dQuantization validatorQSymm16(qSymm16options, shape, shape);
     VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
@@ -1666,7 +1666,7 @@ BOOST_AUTO_TEST_CASE(QuantizeConstant)
     TestConstantQuantization validatorQSymm8(qSymm8Options, shape, shape);
     VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
 
-    const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+    const QuantizerOptions qSymm16options(DataType::QSymmS16);
     INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork();
     TestConstantQuantization validatorQSymm16(qSymm16options, shape, shape);
     VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
@@ -1721,7 +1721,7 @@ BOOST_AUTO_TEST_CASE(QuantizeAbs)
     TestAbsQuantization validatorQSymm8(qSymm8Options, shape, shape);
     VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
 
-    const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+    const QuantizerOptions qSymm16options(DataType::QSymmS16);
     INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork();
     TestAbsQuantization validatorQSymm16(qSymm16options, shape, shape);
     VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
@@ -1804,7 +1804,7 @@ BOOST_AUTO_TEST_CASE(QuantizeArgMinMax)
     TestArgMinMaxQuantization validatorQSymm8(qSymm8Options, inputShape, outputShape);
     VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
 
-    const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+    const QuantizerOptions qSymm16options(DataType::QSymmS16);
     INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork();
     TestArgMinMaxQuantization validatorQSymm16(qSymm16options, inputShape, outputShape);
     VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
@@ -1868,7 +1868,7 @@ BOOST_AUTO_TEST_CASE(QuantizeComparison)
     VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
 
     // test QuantisedSymm16 quantization
-    const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+    const QuantizerOptions qSymm16options(DataType::QSymmS16);
     INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork();
     TestComparisonQuantization validatorQSymm16(qSymm16options, tensorShape, tensorShape);
     VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
@@ -1949,7 +1949,7 @@ BOOST_AUTO_TEST_CASE(QuantizeConcat)
     concatLayer->GetOutputSlot(0).SetTensorInfo(info);
 
     const QuantizerOptions qSymm8Options(DataType::QSymmS8);
-    const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+    const QuantizerOptions qSymm16options(DataType::QSymmS16);
     INetworkQuantizerPtr quantizerPtrQAsymm8 =  INetworkQuantizer::Create(network.get());
     INetworkQuantizerPtr quantizerPtrQSymm8  =  INetworkQuantizer::Create(network.get(), qSymm8Options);
     INetworkQuantizerPtr quantizerPtrQSymm16 =  INetworkQuantizer::Create(network.get(), qSymm16options);
@@ -2026,7 +2026,7 @@ BOOST_AUTO_TEST_CASE(QuantizeReshape)
     TestReshapeQuantization validatorQSymm8(qSymm8Options, shape, shape);
     VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
 
-    const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+    const QuantizerOptions qSymm16options(DataType::QSymmS16);
     INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork();
     TestReshapeQuantization validatorQSymm16(qSymm16options, shape, shape);
     VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
@@ -2075,7 +2075,7 @@ BOOST_AUTO_TEST_CASE(QuantizeSplitter)
     TestSplitterQuantization validatorQSymm8(qSymm8Options, shape, shape);
     VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
 
-    const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+    const QuantizerOptions qSymm16options(DataType::QSymmS16);
     INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork();
     TestSplitterQuantization validatorQSymm16(qSymm16options, shape, shape);
     VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
@@ -2129,7 +2129,7 @@ BOOST_AUTO_TEST_CASE(QuantizeResize)
     TestResizeQuantization validatorQSymm8(qSymm8Options, shape, shape);
     VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
 
-    const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+    const QuantizerOptions qSymm16options(DataType::QSymmS16);
     INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork();
     TestResizeQuantization validatorQSymm16(qSymm16options, shape, shape);
     VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
@@ -2179,7 +2179,7 @@ BOOST_AUTO_TEST_CASE(QuantizeStridedSlice)
     TestStridedSliceQuantization validatorQSymm8(qSymm8Options, shape, shape);
     VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
 
-    const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+    const QuantizerOptions qSymm16options(DataType::QSymmS16);
     INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork();
     TestStridedSliceQuantization validatorQSymm16(qSymm16options, shape, shape);
     VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
@@ -2229,7 +2229,7 @@ BOOST_AUTO_TEST_CASE(QuantizeBatchToSpace)
     TestBatchToSpaceQuantization validatorQSymm8(qSymm8Options, shape, shape);
     VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
 
-    const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+    const QuantizerOptions qSymm16options(DataType::QSymmS16);
     INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork();
     TestBatchToSpaceQuantization validatorQSymm16(qSymm16options, shape, shape);
     VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
@@ -2343,7 +2343,7 @@ BOOST_AUTO_TEST_CASE(QuantizePrelu)
     TestPreluQuantization validatorQSymm8(qSymm8Options, inputShape, alphaShape, outputShape);
     VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
 
-    const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+    const QuantizerOptions qSymm16options(DataType::QSymmS16);
     INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork();
     TestPreluQuantization validatorQSymm16(qSymm16options, inputShape, alphaShape, outputShape);
     VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
@@ -2417,7 +2417,7 @@ void TestQuantizeTransposeConvolution2d(bool useBiases)
     VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
 
     // test QSymm16 quantization
-    const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+    const QuantizerOptions qSymm16options(DataType::QSymmS16);
     INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork();
     TestTransposeConvolution2dQuantization validatorQSymm16(qSymm16options, shape, shape);
     VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
@@ -2500,7 +2500,7 @@ BOOST_AUTO_TEST_CASE(QuantizeStack)
     TestStackQuantization validatorQSymm8(qSymm8Options, inputShape, inputShape);
     VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
 
-    const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+    const QuantizerOptions qSymm16options(DataType::QSymmS16);
     INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork();
     TestStackQuantization validatorQSymm16(qSymm16options, inputShape, outputShape);
     VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
@@ -2562,7 +2562,7 @@ BOOST_AUTO_TEST_CASE(QuantizeSlice)
     VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
 
     // test QSymm16 quantization
-    const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+    const QuantizerOptions qSymm16options(DataType::QSymmS16);
     INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork();
     TestSliceQuantization validatorQSymm16(qSymm16options, shape, shape);
     VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
@@ -2679,7 +2679,7 @@ void PreserveTypeTestImpl(const DataType& dataType)
     addition->GetOutputSlot(0).SetTensorInfo(info);
 
     QuantizerOptions options = dataType == DataType::Float32 ?
-            QuantizerOptions(DataType::QuantisedAsymm8, true) : QuantizerOptions(dataType, true);
+            QuantizerOptions(DataType::QAsymmU8, true) : QuantizerOptions(dataType, true);
 
     INetworkPtr quantizedNetworkQAsymm8 = INetworkQuantizer::Create(network.get(), options)->ExportNetwork();
     TestPreserveType validatorQAsymm8(options, dataType, shape, shape);
@@ -2695,7 +2695,7 @@ BOOST_AUTO_TEST_CASE(PreserveTypeFloat32)
 
 BOOST_AUTO_TEST_CASE(PreserveTypeQAsymm8)
 {
-    PreserveTypeTestImpl(DataType::QuantisedAsymm8);
+    PreserveTypeTestImpl(DataType::QAsymmU8);
 }
 
 BOOST_AUTO_TEST_CASE(PreserveTypeQsymm8)
@@ -2705,7 +2705,7 @@ BOOST_AUTO_TEST_CASE(PreserveTypeQsymm8)
 
 BOOST_AUTO_TEST_CASE(PreserveTypeQsymm16)
 {
-    PreserveTypeTestImpl(DataType::QuantisedSymm16);
+    PreserveTypeTestImpl(DataType::QSymmS16);
 }
 
 BOOST_AUTO_TEST_SUITE_END()
index 642f3345752315c302b0542512e9275d4820da99..7263cbd78405953d5e1ff63597c101c81ad754e3 100644 (file)
@@ -261,14 +261,14 @@ BOOST_AUTO_TEST_CASE(IVGCVSW_1929_QuantizedSoftmaxIssue)
 
     input->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo(
             armnn::TensorShape({ 1, 5 }),
-            armnn::DataType::QuantisedAsymm8,
+            armnn::DataType::QAsymmU8,
             1.0f/255,
             0
     ));
 
     softmax->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo(
             armnn::TensorShape({ 1, 5 }),
-            armnn::DataType::QuantisedAsymm8
+            armnn::DataType::QAsymmU8
     ));
 
     std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
index 154a0bca04c4c2903181dc782d2e6d2655e2c8ee..3696a112cbbf04603215e7641fdb28aec986f3c9 100644 (file)
@@ -125,8 +125,8 @@ BOOST_AUTO_TEST_CASE(ModifyTensorInfo)
     TensorInfo info;
     info.SetShape({ 5, 6, 7, 8 });
     BOOST_TEST((info.GetShape() == TensorShape({ 5, 6, 7, 8 })));
-    info.SetDataType(DataType::QuantisedAsymm8);
-    BOOST_TEST((info.GetDataType() == DataType::QuantisedAsymm8));
+    info.SetDataType(DataType::QAsymmU8);
+    BOOST_TEST((info.GetDataType() == DataType::QAsymmU8));
     info.SetQuantizationScale(10.0f);
     BOOST_TEST(info.GetQuantizationScale() == 10.0f);
     info.SetQuantizationOffset(5);
index 897a35fe4cbd9514a5b4a70e139c5db7eecf83cd..4c371d6ed9338b74858bb790ed29b49238f4e977 100644 (file)
@@ -18,7 +18,7 @@ BOOST_AUTO_TEST_SUITE(Utils)
 BOOST_AUTO_TEST_CASE(DataTypeSize)
 {
     BOOST_TEST(armnn::GetDataTypeSize(armnn::DataType::Float32) == 4);
-    BOOST_TEST(armnn::GetDataTypeSize(armnn::DataType::QuantisedAsymm8) == 1);
+    BOOST_TEST(armnn::GetDataTypeSize(armnn::DataType::QAsymmU8) == 1);
     BOOST_TEST(armnn::GetDataTypeSize(armnn::DataType::Signed32) == 4);
     BOOST_TEST(armnn::GetDataTypeSize(armnn::DataType::Boolean) == 1);
 }
index 6a65c6d6d598612739b243a1fbb3a1eda06c18f9..aaf6aa9696dbf9e7098f079a123a2b6386ebc823 100644 (file)
@@ -475,10 +475,12 @@ armnn::TensorInfo ToTensorInfo(Deserializer::TensorRawPtr tensorPtr)
     switch (tensorPtr->dataType())
     {
         case DataType_QuantisedAsymm8:
-            type = armnn::DataType::QuantisedAsymm8;
+        case DataType_QAsymmU8:
+            type = armnn::DataType::QAsymmU8;
             break;
+        case DataType_QSymmS16:
         case DataType_QuantisedSymm16:
-            type = armnn::DataType::QuantisedSymm16;
+            type = armnn::DataType::QSymmS16;
             break;
         case DataType_Signed32:
             type = armnn::DataType::Signed32;
index 9e9889303803c9b9b3b860fbc9a34ef6b6f342c2..44765d56a00cf4effc2093e0da030ce290338f60 100644 (file)
@@ -144,7 +144,7 @@ struct SimpleActivationFixture4 : ActivationFixture
 
 BOOST_FIXTURE_TEST_CASE(ActivationReluQuantisedAsymm8, SimpleActivationFixture)
 {
-    RunTest<4, armnn::DataType::QuantisedAsymm8>(
+    RunTest<4, armnn::DataType::QAsymmU8>(
             0,
             {{"InputLayer", {10, 0, 2, 0}}},
             {{"OutputLayer", {10, 0, 2, 0}}});
@@ -161,7 +161,7 @@ BOOST_FIXTURE_TEST_CASE(ActivationReluFloat32, SimpleActivationFixture2)
 
 BOOST_FIXTURE_TEST_CASE(ActivationBoundedReluQuantisedAsymm8, SimpleActivationFixture3)
 {
-    RunTest<4, armnn::DataType::QuantisedAsymm8>(
+    RunTest<4, armnn::DataType::QAsymmU8>(
             0,
             {{"InputLayer", {10, 0, 2, 0}}},
             {{"OutputLayer", {5, 0, 2, 0}}});
index be292bc304c9f060077e8049aecc0683946c524d..325bb6e1c33d0f8edc01aadd82310e38226f428c 100644 (file)
@@ -145,7 +145,7 @@ struct SimpleAddFixture2 : AddFixture
 
 BOOST_FIXTURE_TEST_CASE(AddQuantisedAsymm8, SimpleAddFixture)
 {
-  RunTest<2, armnn::DataType::QuantisedAsymm8>(
+  RunTest<2, armnn::DataType::QAsymmU8>(
       0,
       {{"InputLayer1", { 0, 1, 2, 3 }},
       {"InputLayer2", { 4, 5, 6, 7 }}},
index 6616398b73f21f6d3333f29dbdff81966955607c..9881b9e61e8cc850b2122f3ed86b982d7a579ffc 100644 (file)
@@ -242,11 +242,21 @@ DECLARE_SIMPLE_COMPARISON_TEST_CASE(Less,           Float32)
 DECLARE_SIMPLE_COMPARISON_TEST_CASE(LessOrEqual,    Float32)
 DECLARE_SIMPLE_COMPARISON_TEST_CASE(NotEqual,       Float32)
 
+
+ARMNN_NO_DEPRECATE_WARN_BEGIN
 DECLARE_SIMPLE_COMPARISON_TEST_CASE(Equal,          QuantisedAsymm8)
 DECLARE_SIMPLE_COMPARISON_TEST_CASE(Greater,        QuantisedAsymm8)
 DECLARE_SIMPLE_COMPARISON_TEST_CASE(GreaterOrEqual, QuantisedAsymm8)
 DECLARE_SIMPLE_COMPARISON_TEST_CASE(Less,           QuantisedAsymm8)
 DECLARE_SIMPLE_COMPARISON_TEST_CASE(LessOrEqual,    QuantisedAsymm8)
 DECLARE_SIMPLE_COMPARISON_TEST_CASE(NotEqual,       QuantisedAsymm8)
+ARMNN_NO_DEPRECATE_WARN_END
+
+DECLARE_SIMPLE_COMPARISON_TEST_CASE(Equal,          QAsymmU8)
+DECLARE_SIMPLE_COMPARISON_TEST_CASE(Greater,        QAsymmU8)
+DECLARE_SIMPLE_COMPARISON_TEST_CASE(GreaterOrEqual, QAsymmU8)
+DECLARE_SIMPLE_COMPARISON_TEST_CASE(Less,           QAsymmU8)
+DECLARE_SIMPLE_COMPARISON_TEST_CASE(LessOrEqual,    QAsymmU8)
+DECLARE_SIMPLE_COMPARISON_TEST_CASE(NotEqual,       QAsymmU8)
 
 BOOST_AUTO_TEST_SUITE_END()
index 0abe5e6ca1eb399880cc361815329184adad3dbf..cb0e4ad42f36df5f4a385bef9243beeebce2c149 100644 (file)
@@ -143,7 +143,7 @@ struct SimpleConstantAddFixture : ConstantAddFixture
 
 BOOST_FIXTURE_TEST_CASE(SimpleConstantAddQuantisedAsymm8, SimpleConstantAddFixture)
 {
-    RunTest<2, armnn::DataType::QuantisedAsymm8>(
+    RunTest<2, armnn::DataType::QAsymmU8>(
             0,
             { 1, 2, 3, 4, 5, 6  },
             { 2, 4, 6, 8, 10, 12 });
index dc6f5820cfd962f5b6f840dae5d55065d4c7e5fb..d12b043ce05c13a01e265f4c09c212e8feb5670b 100644 (file)
@@ -141,7 +141,7 @@ struct SimpleDivisionFixture2 : DivisionFixture
 
 BOOST_FIXTURE_TEST_CASE(DivisionQuantisedAsymm8, SimpleDivisionFixture)
 {
-    RunTest<2, armnn::DataType::QuantisedAsymm8>(
+    RunTest<2, armnn::DataType::QAsymmU8>(
         0,
         {{"InputLayer1", { 0, 5, 24, 21 }},
          {"InputLayer2", { 4, 1, 6,  7 }}},
index 77d0acc782f18f8a6ed71d96d8a9f1c6d614ef2e..90698cb993f1dba8123dda5610765236b363454c 100644 (file)
@@ -131,7 +131,7 @@ struct FullyConnectedWithNoBiasFixture : FullyConnectedFixture
 
 BOOST_FIXTURE_TEST_CASE(FullyConnectedWithNoBias, FullyConnectedWithNoBiasFixture)
 {
-    RunTest<2, armnn::DataType::QuantisedAsymm8>(
+    RunTest<2, armnn::DataType::QAsymmU8>(
          0,
          {{"InputLayer",  { 10, 20, 30, 40 }}},
          {{"OutputLayer", { 400/2 }}});
index f784ba6f3174943b31f0efedf395d30fe8526943..c0bb13e17b27d611507dc7beb7a36de55f19eab7 100644 (file)
@@ -145,7 +145,7 @@ struct SimpleMultiplicationFixture2 : MultiplicationFixture
 
 BOOST_FIXTURE_TEST_CASE(MultiplicationQuantisedAsymm8, SimpleMultiplicationFixture)
 {
-  RunTest<2, armnn::DataType::QuantisedAsymm8>(
+  RunTest<2, armnn::DataType::QAsymmU8>(
       0,
       {{"InputLayer1", { 0, 1, 2, 3 }},
       {"InputLayer2", { 4, 5, 6, 7 }}},
index b18710a38162c02a56d6f2e836967fb532345f96..d9087d963e8b4298a326369541452f92dc115349 100644 (file)
@@ -112,7 +112,7 @@ struct SimplePadFixture : PadFixture
 
 BOOST_FIXTURE_TEST_CASE(SimplePadQuantisedAsymm8, SimplePadFixture)
 {
-    RunTest<3, armnn::DataType::QuantisedAsymm8>(0,
+    RunTest<3, armnn::DataType::QAsymmU8>(0,
                                                  {
                                                     0, 4, 2, 5, 6, 1, 5, 2
                                                  },
index 6d08b5fee9cf58b38b1b88290288d304873951b3..be50a673283486448544adade17d3fefd63a1029 100644 (file)
@@ -112,7 +112,7 @@ struct SimplePermute2DFixture : PermuteFixture
 
 BOOST_FIXTURE_TEST_CASE(SimplePermute2DQuantisedAsymm8, SimplePermute2DFixture)
 {
-    RunTest<2, armnn::DataType::QuantisedAsymm8>(0,
+    RunTest<2, armnn::DataType::QAsymmU8>(0,
                                                  { 1, 2, 3, 4, 5, 6 },
                                                  { 1, 4, 2, 5, 3, 6 });
 }
@@ -127,7 +127,7 @@ struct SimplePermute4DFixture : PermuteFixture
 
 BOOST_FIXTURE_TEST_CASE(SimplePermute4DQuantisedAsymm8, SimplePermute4DFixture)
 {
-    RunTest<4, armnn::DataType::QuantisedAsymm8>(0,
+    RunTest<4, armnn::DataType::QAsymmU8>(0,
                                                  {  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12,
                                                    13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
                                                  {  1, 13,  5, 17,  9, 21,  2, 14,  6, 18, 10, 22,
index 55fb655b34b89b13568fbf6ee70624a73acdd7ce..b63aeb5fdc776c01237eedc3d43d0ddb7177e0c3 100644 (file)
@@ -141,7 +141,7 @@ BOOST_FIXTURE_TEST_CASE(Pooling2dFloat32Avg, SimpleAvgPooling2dFixture)
 
 BOOST_FIXTURE_TEST_CASE(Pooling2dQuantisedAsymm8Avg, SimpleAvgPooling2dFixture2)
 {
-    RunTest<4, armnn::DataType::QuantisedAsymm8>(0,
+    RunTest<4, armnn::DataType::QAsymmU8>(0,
                                                 { 20, 40, 60, 80 },
                                                 { 50 });
 }
@@ -153,7 +153,7 @@ BOOST_FIXTURE_TEST_CASE(Pooling2dFloat32Max, SimpleMaxPooling2dFixture)
 
 BOOST_FIXTURE_TEST_CASE(Pooling2dQuantisedAsymm8Max, SimpleMaxPooling2dFixture2)
 {
-    RunTest<4, armnn::DataType::QuantisedAsymm8>(0,
+    RunTest<4, armnn::DataType::QAsymmU8>(0,
                                                 { 20, 40, 60, 80 },
                                                 { 80 });
 }
index 301d8986c0859ff07e2e1c6e4616d664e513582d..554b867db759b42433c019b7b533aeab9e0877a7 100644 (file)
@@ -112,7 +112,7 @@ struct SimpleReshapeFixture2 : ReshapeFixture
 
 BOOST_FIXTURE_TEST_CASE(ReshapeQuantisedAsymm8, SimpleReshapeFixture)
 {
-    RunTest<2, armnn::DataType::QuantisedAsymm8>(0,
+    RunTest<2, armnn::DataType::QAsymmU8>(0,
                                                 { 1, 2, 3, 4, 5, 6, 7, 8, 9 },
                                                 { 1, 2, 3, 4, 5, 6, 7, 8, 9 });
 }
index 5058bb840d312f18c3efae93ec0f5ff4d686b308..a4bd0fbeb4c046b632de86fa09eb200a1140aaaa 100644 (file)
@@ -148,7 +148,7 @@ struct SimpleSubtractionFixtureBroadcast : SubtractionFixture
 
 BOOST_FIXTURE_TEST_CASE(SubtractionQuantisedAsymm8, SimpleSubtractionFixture)
 {
-    RunTest<2, armnn::DataType::QuantisedAsymm8>(
+    RunTest<2, armnn::DataType::QAsymmU8>(
         0,
         {{"inputLayer1", { 4, 5, 6, 7 }},
          {"inputLayer2", { 3, 2, 1, 0 }}},
index 227a105bbacbb939974804578649f80d0612de4f..30167e73f2c44f4b8e91c385d1b21b4e49d9d22b 100644 (file)
@@ -37,8 +37,8 @@ int main(int argc, char* argv[])
 
     armnn::QuantizerOptions quantizerOptions;
     quantizerOptions.m_ActivationFormat = cmdline.GetQuantizationScheme() == "QSymm16"
-                                          ? armnn::DataType::QuantisedSymm16
-                                          : armnn::DataType::QuantisedAsymm8;
+                                          ? armnn::DataType::QSymmS16
+                                          : armnn::DataType::QAsymmU8;
 
     quantizerOptions.m_PreserveType = cmdline.HasPreservedDataType();
 
index bad95cfc560282184c27f051af7e7ed101be4eaf..0d30d96452b545b5466493e8ceb436f575c87762 100644 (file)
@@ -30,10 +30,12 @@ enum ArgMinMaxFunction : byte {
 enum DataType : byte {
     Float16 = 0,
     Float32 = 1,
-    QuantisedAsymm8 = 2,
+    QuantisedAsymm8 = 2, // deprecated
     Signed32 = 3,
     Boolean = 4,
-    QuantisedSymm16 = 5
+    QuantisedSymm16 = 5, // deprecated
+    QAsymmU8 = 6,
+    QSymmS16 = 7
 }
 
 enum DataLayout : byte {
index 608a9c3480823f71350cf5bfbc4055c9f328872b..be6fa64b0a4ab2dcab5d36bfc5a8fa597dcab737 100644 (file)
@@ -1405,7 +1405,7 @@ flatbuffers::Offset<serializer::ConstTensor>
             fbPayload = flatBuffersData.o;
             break;
         }
-        case armnn::DataType::QuantisedSymm16:
+        case armnn::DataType::QSymmS16:
         {
             auto fbVector = CreateDataVector<int16_t>(constTensor.GetMemoryArea(), constTensor.GetNumBytes());
             flatbuffers::Offset<serializer::ShortData> flatBuffersData = serializer::CreateShortData(
@@ -1414,7 +1414,7 @@ flatbuffers::Offset<serializer::ConstTensor>
             fbPayload = flatBuffersData.o;
             break;
         }
-        case armnn::DataType::QuantisedAsymm8:
+        case armnn::DataType::QAsymmU8:
         case armnn::DataType::Boolean:
         default:
         {
index 908da6450ca89b6d732f0569076a89c1a615f69c..df1ef285de042bf74abe0cc7817b3504ebf48631 100644 (file)
@@ -36,9 +36,9 @@ armnnSerializer::ConstTensorData GetFlatBufferConstTensorData(armnn::DataType da
         case armnn::DataType::Signed32:
             return armnnSerializer::ConstTensorData::ConstTensorData_IntData;
         case armnn::DataType::Float16:
-        case armnn::DataType::QuantisedSymm16:
+        case armnn::DataType::QSymmS16:
             return armnnSerializer::ConstTensorData::ConstTensorData_ShortData;
-        case armnn::DataType::QuantisedAsymm8:
+        case armnn::DataType::QAsymmU8:
         case armnn::DataType::Boolean:
             return armnnSerializer::ConstTensorData::ConstTensorData_ByteData;
         default:
@@ -56,10 +56,10 @@ armnnSerializer::DataType GetFlatBufferDataType(armnn::DataType dataType)
             return armnnSerializer::DataType::DataType_Float16;
         case armnn::DataType::Signed32:
             return armnnSerializer::DataType::DataType_Signed32;
-        case armnn::DataType::QuantisedSymm16:
-            return armnnSerializer::DataType::DataType_QuantisedSymm16;
-        case armnn::DataType::QuantisedAsymm8:
-            return armnnSerializer::DataType::DataType_QuantisedAsymm8;
+        case armnn::DataType::QSymmS16:
+            return armnnSerializer::DataType::DataType_QSymmS16;
+        case armnn::DataType::QAsymmU8:
+            return armnnSerializer::DataType::DataType_QAsymmU8;
         case armnn::DataType::Boolean:
             return armnnSerializer::DataType::DataType_Boolean;
         default:
index 8dfca3c52e497591aa5d49e2f05a4bdf9da7a692..3e67cf062f2eb1796e6310c9d8fbdb325ab1b5f4 100644 (file)
@@ -206,7 +206,7 @@ void CompareConstTensor(const armnn::ConstTensor& tensor1, const armnn::ConstTen
             CompareConstTensorData<const float*>(
                 tensor1.GetMemoryArea(), tensor2.GetMemoryArea(), tensor1.GetNumElements());
             break;
-        case armnn::DataType::QuantisedAsymm8:
+        case armnn::DataType::QAsymmU8:
         case armnn::DataType::Boolean:
             CompareConstTensorData<const uint8_t*>(
                 tensor1.GetMemoryArea(), tensor2.GetMemoryArea(), tensor1.GetNumElements());
@@ -770,7 +770,7 @@ BOOST_AUTO_TEST_CASE(SerializeDequantize)
     DECLARE_LAYER_VERIFIER_CLASS(Dequantize)
 
     const std::string layerName("dequantize");
-    const armnn::TensorInfo inputInfo({ 1, 5, 2, 3 }, armnn::DataType::QuantisedAsymm8, 0.5f, 1);
+    const armnn::TensorInfo inputInfo({ 1, 5, 2, 3 }, armnn::DataType::QAsymmU8, 0.5f, 1);
     const armnn::TensorInfo outputInfo({ 1, 5, 2, 3 }, armnn::DataType::Float32);
 
     armnn::INetworkPtr network = armnn::INetwork::Create();
@@ -1152,8 +1152,8 @@ BOOST_AUTO_TEST_CASE(SerializeGather)
     };
 
     const std::string layerName("gather");
-    armnn::TensorInfo paramsInfo({ 8 }, armnn::DataType::QuantisedAsymm8);
-    armnn::TensorInfo outputInfo({ 3 }, armnn::DataType::QuantisedAsymm8);
+    armnn::TensorInfo paramsInfo({ 8 }, armnn::DataType::QAsymmU8);
+    armnn::TensorInfo outputInfo({ 3 }, armnn::DataType::QAsymmU8);
     const armnn::TensorInfo indicesInfo({ 3 }, armnn::DataType::Signed32);
 
     paramsInfo.SetQuantizationScale(1.0f);
@@ -3994,7 +3994,7 @@ BOOST_AUTO_TEST_CASE(SerializeDeserializeQuantizedLstm)
     armnn::TensorShape inputToInputWeightsShape = {4, 2};
     std::vector<uint8_t> inputToInputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8};
     armnn::TensorInfo inputToInputWeightsInfo(inputToInputWeightsShape,
-                                              armnn::DataType::QuantisedAsymm8,
+                                              armnn::DataType::QAsymmU8,
                                               weightsScale,
                                               weightsOffset);
     armnn::ConstTensor inputToInputWeights(inputToInputWeightsInfo, inputToInputWeightsData);
@@ -4002,7 +4002,7 @@ BOOST_AUTO_TEST_CASE(SerializeDeserializeQuantizedLstm)
     armnn::TensorShape inputToForgetWeightsShape = {4, 2};
     std::vector<uint8_t> inputToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8};
     armnn::TensorInfo inputToForgetWeightsInfo(inputToForgetWeightsShape,
-                                               armnn::DataType::QuantisedAsymm8,
+                                               armnn::DataType::QAsymmU8,
                                                weightsScale,
                                                weightsOffset);
     armnn::ConstTensor inputToForgetWeights(inputToForgetWeightsInfo, inputToForgetWeightsData);
@@ -4010,7 +4010,7 @@ BOOST_AUTO_TEST_CASE(SerializeDeserializeQuantizedLstm)
     armnn::TensorShape inputToCellWeightsShape = {4, 2};
     std::vector<uint8_t> inputToCellWeightsData = {1, 2, 3, 4, 5, 6, 7, 8};
     armnn::TensorInfo inputToCellWeightsInfo(inputToCellWeightsShape,
-                                             armnn::DataType::QuantisedAsymm8,
+                                             armnn::DataType::QAsymmU8,
                                              weightsScale,
                                              weightsOffset);
     armnn::ConstTensor inputToCellWeights(inputToCellWeightsInfo, inputToCellWeightsData);
@@ -4018,7 +4018,7 @@ BOOST_AUTO_TEST_CASE(SerializeDeserializeQuantizedLstm)
     armnn::TensorShape inputToOutputWeightsShape = {4, 2};
     std::vector<uint8_t> inputToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8};
     armnn::TensorInfo inputToOutputWeightsInfo(inputToOutputWeightsShape,
-                                               armnn::DataType::QuantisedAsymm8,
+                                               armnn::DataType::QAsymmU8,
                                                weightsScale,
                                                weightsOffset);
     armnn::ConstTensor inputToOutputWeights(inputToOutputWeightsInfo, inputToOutputWeightsData);
@@ -4027,7 +4027,7 @@ BOOST_AUTO_TEST_CASE(SerializeDeserializeQuantizedLstm)
     armnn::TensorShape recurrentToInputWeightsShape = {4, 4};
     std::vector<uint8_t> recurrentToInputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16};
     armnn::TensorInfo recurrentToInputWeightsInfo(recurrentToInputWeightsShape,
-                                                  armnn::DataType::QuantisedAsymm8,
+                                                  armnn::DataType::QAsymmU8,
                                                   weightsScale,
                                                   weightsOffset);
     armnn::ConstTensor recurrentToInputWeights(recurrentToInputWeightsInfo, recurrentToInputWeightsData);
@@ -4035,7 +4035,7 @@ BOOST_AUTO_TEST_CASE(SerializeDeserializeQuantizedLstm)
     armnn::TensorShape recurrentToForgetWeightsShape = {4, 4};
     std::vector<uint8_t> recurrentToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16};
     armnn::TensorInfo recurrentToForgetWeightsInfo(recurrentToForgetWeightsShape,
-                                                   armnn::DataType::QuantisedAsymm8,
+                                                   armnn::DataType::QAsymmU8,
                                                    weightsScale,
                                                    weightsOffset);
     armnn::ConstTensor recurrentToForgetWeights(recurrentToForgetWeightsInfo, recurrentToForgetWeightsData);
@@ -4043,7 +4043,7 @@ BOOST_AUTO_TEST_CASE(SerializeDeserializeQuantizedLstm)
     armnn::TensorShape recurrentToCellWeightsShape = {4, 4};
     std::vector<uint8_t> recurrentToCellWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16};
     armnn::TensorInfo recurrentToCellWeightsInfo(recurrentToCellWeightsShape,
-                                                 armnn::DataType::QuantisedAsymm8,
+                                                 armnn::DataType::QAsymmU8,
                                                  weightsScale,
                                                  weightsOffset);
     armnn::ConstTensor recurrentToCellWeights(recurrentToCellWeightsInfo, recurrentToCellWeightsData);
@@ -4051,7 +4051,7 @@ BOOST_AUTO_TEST_CASE(SerializeDeserializeQuantizedLstm)
     armnn::TensorShape recurrentToOutputWeightsShape = {4, 4};
     std::vector<uint8_t> recurrentToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16};
     armnn::TensorInfo recurrentToOutputWeightsInfo(recurrentToOutputWeightsShape,
-                                                   armnn::DataType::QuantisedAsymm8,
+                                                   armnn::DataType::QAsymmU8,
                                                    weightsScale,
                                                    weightsOffset);
     armnn::ConstTensor recurrentToOutputWeights(recurrentToOutputWeightsInfo, recurrentToOutputWeightsData);
@@ -4114,15 +4114,15 @@ BOOST_AUTO_TEST_CASE(SerializeDeserializeQuantizedLstm)
 
     // Connect up
     armnn::TensorInfo inputTensorInfo({ batchSize, inputSize },
-                                      armnn::DataType::QuantisedAsymm8,
+                                      armnn::DataType::QAsymmU8,
                                       inputOutputScale,
                                       inputOutputOffset);
     armnn::TensorInfo cellStateTensorInfo({ batchSize, numUnits },
-                                          armnn::DataType::QuantisedSymm16,
+                                          armnn::DataType::QSymmS16,
                                           cellStateScale,
                                           cellStateOffset);
     armnn::TensorInfo outputStateTensorInfo({ batchSize, outputSize },
-                                            armnn::DataType::QuantisedAsymm8,
+                                            armnn::DataType::QAsymmU8,
                                             inputOutputScale,
                                             inputOutputOffset);
 
index 9c7dda8aecad8e53c3275e34cb46245bc8bc3045..22d65645a383804eec8965e8da003fd197b3f2c1 100644 (file)
@@ -309,7 +309,7 @@ armnn::TensorInfo ToTensorInfo(TfLiteParser::TensorRawPtr tensorPtr, const std::
     switch (tensorPtr->type)
     {
         case tflite::TensorType_UINT8:
-            type = armnn::DataType::QuantisedAsymm8;
+            type = armnn::DataType::QAsymmU8;
             break;
         case tflite::TensorType_FLOAT32:
             type = armnn::DataType::Float32;
@@ -318,7 +318,7 @@ armnn::TensorInfo ToTensorInfo(TfLiteParser::TensorRawPtr tensorPtr, const std::
             type = armnn::DataType::QSymmS8;
             break;
         case tflite::TensorType_INT16:
-            type = armnn::DataType::QuantisedSymm16;
+            type = armnn::DataType::QSymmS16;
             break;
         case tflite::TensorType_INT32:
             type = armnn::DataType::Signed32;
@@ -2818,7 +2818,7 @@ TfLiteParser::CreateConstTensor(TensorRawPtr tensorPtr,
                                                         tensorPtr,
                                                         tensorInfo,
                                                         permutationVector);
-        case armnn::DataType::QuantisedAsymm8:
+        case armnn::DataType::QAsymmU8:
             return CreateConstTensorAndStoreData<uint8_t>(bufferPtr,
                                                           tensorPtr,
                                                           tensorInfo,
index 94389d31344eeb31edd3b29a534b502159eed9d0..deeb707a2f0331edb00a2543b0cd42a147c9e89c 100644 (file)
@@ -97,7 +97,7 @@ struct SimpleAddFixture : AddFixture
 
 BOOST_FIXTURE_TEST_CASE(SimpleAdd, SimpleAddFixture)
 {
-  RunTest<2, armnn::DataType::QuantisedAsymm8>(
+  RunTest<2, armnn::DataType::QAsymmU8>(
       0,
       {{"inputTensor1", { 0, 1, 2, 3 }},
       {"inputTensor2", { 4, 5, 6, 7 }}},
index a39c088d4419665a072f0bd5bb0b3654937ecb8c..a56e7e7362b308e487e1a89f00284f4f69218513 100644 (file)
@@ -98,7 +98,7 @@ struct AvgPoolLiteFixture2DOutput : AvgPool2DFixture
 
 BOOST_FIXTURE_TEST_CASE(AvgPoolLite1DOutput, AvgPoolLiteFixtureUint1DOutput)
 {
-    RunTest<4, armnn::DataType::QuantisedAsymm8>(0, {2, 3, 5, 2 }, { 3 });
+    RunTest<4, armnn::DataType::QAsymmU8>(0, {2, 3, 5, 2 }, { 3 });
 }
 
 BOOST_FIXTURE_TEST_CASE(AvgPoolLiteFloat1DOutput, AvgPoolLiteFixtureFloat1DOutput)
@@ -108,13 +108,13 @@ BOOST_FIXTURE_TEST_CASE(AvgPoolLiteFloat1DOutput, AvgPoolLiteFixtureFloat1DOutpu
 
 BOOST_FIXTURE_TEST_CASE(AvgPoolLite2DOutput, AvgPoolLiteFixture2DOutput)
 {
-    RunTest<4, armnn::DataType::QuantisedAsymm8>(
+    RunTest<4, armnn::DataType::QAsymmU8>(
         0, { 1, 2, 2, 3, 5, 6, 7, 8, 3, 2, 1, 0, 1, 2, 3, 4 }, { 4, 5, 2, 2 });
 }
 
 BOOST_FIXTURE_TEST_CASE(IncorrectDataTypeError, AvgPoolLiteFixtureFloat1DOutput)
 {
-    BOOST_CHECK_THROW((RunTest<4, armnn::DataType::QuantisedAsymm8>(0, {2, 3, 5, 2 }, { 3 })), armnn::Exception);
+    BOOST_CHECK_THROW((RunTest<4, armnn::DataType::QAsymmU8>(0, {2, 3, 5, 2 }, { 3 })), armnn::Exception);
 }
 
 BOOST_AUTO_TEST_SUITE_END()
index d3d571f1744aa6f85ff952011b7be6e1b19e4460..8e31a3edb023d77ce2fca8cc82c409b189162bc3 100644 (file)
@@ -100,7 +100,7 @@ struct ConcatenationFixtureNegativeDim : ConcatenationFixture
 
 BOOST_FIXTURE_TEST_CASE(ParseConcatenationNegativeDim, ConcatenationFixtureNegativeDim)
 {
-    RunTest<4, armnn::DataType::QuantisedAsymm8>(
+    RunTest<4, armnn::DataType::QAsymmU8>(
         0,
         {{"inputTensor1", { 0, 1, 2, 3 }},
         {"inputTensor2", { 4, 5, 6, 7 }}},
@@ -114,7 +114,7 @@ struct ConcatenationFixtureNCHW : ConcatenationFixture
 
 BOOST_FIXTURE_TEST_CASE(ParseConcatenationNCHW, ConcatenationFixtureNCHW)
 {
-    RunTest<4, armnn::DataType::QuantisedAsymm8>(
+    RunTest<4, armnn::DataType::QAsymmU8>(
         0,
         {{"inputTensor1", { 0, 1, 2, 3 }},
         {"inputTensor2", { 4, 5, 6, 7 }}},
@@ -128,7 +128,7 @@ struct ConcatenationFixtureNHWC : ConcatenationFixture
 
 BOOST_FIXTURE_TEST_CASE(ParseConcatenationNHWC, ConcatenationFixtureNHWC)
 {
-    RunTest<4, armnn::DataType::QuantisedAsymm8>(
+    RunTest<4, armnn::DataType::QAsymmU8>(
         0,
         {{"inputTensor1", { 0, 1, 2, 3 }},
         {"inputTensor2", { 4, 5, 6, 7 }}},
@@ -142,7 +142,7 @@ struct ConcatenationFixtureDim1 : ConcatenationFixture
 
 BOOST_FIXTURE_TEST_CASE(ParseConcatenationDim1, ConcatenationFixtureDim1)
 {
-    RunTest<4, armnn::DataType::QuantisedAsymm8>(
+    RunTest<4, armnn::DataType::QAsymmU8>(
         0,
         { { "inputTensor1", {  0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11,
                                12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23 } },
@@ -161,7 +161,7 @@ struct ConcatenationFixtureDim3 : ConcatenationFixture
 
 BOOST_FIXTURE_TEST_CASE(ParseConcatenationDim3, ConcatenationFixtureDim3)
 {
-    RunTest<4, armnn::DataType::QuantisedAsymm8>(
+    RunTest<4, armnn::DataType::QAsymmU8>(
         0,
         { { "inputTensor1", {  0,  1,  2,  3,
                                4,  5,  6,  7,
@@ -196,7 +196,7 @@ struct ConcatenationFixture3DDim0 : ConcatenationFixture
 
 BOOST_FIXTURE_TEST_CASE(ParseConcatenation3DDim0, ConcatenationFixture3DDim0)
 {
-    RunTest<3, armnn::DataType::QuantisedAsymm8>(
+    RunTest<3, armnn::DataType::QAsymmU8>(
         0,
         { { "inputTensor1", { 0,  1,  2,  3,  4,  5 } },
           { "inputTensor2", { 6,  7,  8,  9, 10, 11,
@@ -213,7 +213,7 @@ struct ConcatenationFixture3DDim1 : ConcatenationFixture
 
 BOOST_FIXTURE_TEST_CASE(ParseConcatenation3DDim1, ConcatenationFixture3DDim1)
 {
-    RunTest<3, armnn::DataType::QuantisedAsymm8>(
+    RunTest<3, armnn::DataType::QAsymmU8>(
         0,
         { { "inputTensor1", { 0,  1,  2,  3,  4,  5 } },
           { "inputTensor2", { 6,  7,  8,  9, 10, 11,
@@ -230,7 +230,7 @@ struct ConcatenationFixture3DDim2 : ConcatenationFixture
 
 BOOST_FIXTURE_TEST_CASE(ParseConcatenation3DDim2, ConcatenationFixture3DDim2)
 {
-    RunTest<3, armnn::DataType::QuantisedAsymm8>(
+    RunTest<3, armnn::DataType::QAsymmU8>(
         0,
         { { "inputTensor1", { 0,  1,  2,
                               3,  4,  5 } },
index 356e1b769d0f2513b9ece07a9734c598ea2312f1..cc89223469c6c8c9fa63c6953d07184502f55fb8 100644 (file)
@@ -103,7 +103,7 @@ struct SimpleConstantAddFixture : ConstantAddFixture
 
 BOOST_FIXTURE_TEST_CASE(SimpleConstantAdd, SimpleConstantAddFixture)
 {
-    RunTest<2, armnn::DataType::QuantisedAsymm8>(
+    RunTest<2, armnn::DataType::QAsymmU8>(
                 0,
                 {{"InputTensor", { 0, 1, 2, 3 }}},
                 {{"OutputTensor", { 4, 6, 8, 10 }}}
index 38c6675ddb3940d0a469ff9c944e79aeeb0947e6..2eae5f5a1af4eeef26b9bd1d8bf3fe9a9af8b8c9 100644 (file)
@@ -89,7 +89,7 @@ struct SimpleConv2DFixture : public ParserFlatbuffersFixture
 
 BOOST_FIXTURE_TEST_CASE( ParseSimpleConv2D, SimpleConv2DFixture )
 {
-    RunTest<4, armnn::DataType::QuantisedAsymm8>(
+    RunTest<4, armnn::DataType::QAsymmU8>(
         0,
         {
             1, 2, 3,
@@ -219,7 +219,7 @@ struct SimpleConv2DWithBiasesFixture : Conv2DWithBiasesFixture
 
 BOOST_FIXTURE_TEST_CASE( ParseConv2DWithBias, SimpleConv2DWithBiasesFixture )
 {
-    RunTest<4, armnn::DataType::QuantisedAsymm8>(
+    RunTest<4, armnn::DataType::QAsymmU8>(
         0,
         {
             1, 2,
@@ -290,7 +290,7 @@ BOOST_FIXTURE_TEST_CASE( ParseConv2DAndReluWithBias, ReluConv2DWithBiasesFixture
     uint8_t outZero = 20;
     uint8_t fz = 4; // filter zero point
 
-    RunTest<4, armnn::DataType::QuantisedAsymm8>(
+    RunTest<4, armnn::DataType::QAsymmU8>(
         0,
         {
             1, 2,
@@ -331,7 +331,7 @@ BOOST_FIXTURE_TEST_CASE( ParseConv2DAndRelu6WithBias, Relu6Conv2DWithBiasesFixtu
 {
     uint8_t relu6Min = 6 / 2; // divide by output scale
 
-    RunTest<4, armnn::DataType::QuantisedAsymm8>(
+    RunTest<4, armnn::DataType::QAsymmU8>(
         0,
         {
             1, 2,
index c0767801b38a07af0e159a4ca7e2b7b179f18dc5..2bf08fa79ff4c7c97fc2e4858f2e6192375925f4 100644 (file)
@@ -133,7 +133,7 @@ struct DepthwiseConvolution2dSameFixture : DepthwiseConvolution2dFixture
 
 BOOST_FIXTURE_TEST_CASE(ParseDepthwiseConv2DSame, DepthwiseConvolution2dSameFixture)
 {
-    RunTest<4, armnn::DataType::QuantisedAsymm8>(
+    RunTest<4, armnn::DataType::QAsymmU8>(
         0,
         { 0, 1, 2,
           3, 4, 5,
@@ -160,7 +160,7 @@ struct DepthwiseConvolution2dValidFixture : DepthwiseConvolution2dFixture
 
 BOOST_FIXTURE_TEST_CASE(ParseDepthwiseConv2DValid, DepthwiseConvolution2dValidFixture)
 {
-    RunTest<4, armnn::DataType::QuantisedAsymm8>(
+    RunTest<4, armnn::DataType::QAsymmU8>(
         0,
         { 0, 1, 2,
           3, 4, 5,
@@ -185,7 +185,7 @@ struct DepthwiseConvolution2dSameBiasFixture : DepthwiseConvolution2dFixture
 
 BOOST_FIXTURE_TEST_CASE(ParseDepthwiseConv2DSameBias, DepthwiseConvolution2dSameBiasFixture)
 {
-    RunTest<4, armnn::DataType::QuantisedAsymm8>(
+    RunTest<4, armnn::DataType::QAsymmU8>(
         0,
         { 0, 1, 2,
           3, 4, 5,
index 2f98c07a662c4d2b869c6e71bc158fd44838e48a..79dfe2e26a498b41ef0c4d270b91bf2d171f7eda 100644 (file)
@@ -82,7 +82,7 @@ BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
 
     BOOST_FIXTURE_TEST_CASE(SimpleDequantizeQAsymm8, SimpleDequantizeFixtureQAsymm8)
     {
-        RunTest<2, armnn::DataType::QuantisedAsymm8 , armnn::DataType::Float32>(
+        RunTest<2, armnn::DataType::QAsymmU8 , armnn::DataType::Float32>(
                 0,
                 {{"inputTensor",  { 0u,   1u,   5u,   100u,   200u,   255u }}},
                 {{"outputTensor", { 0.0f, 1.5f, 7.5f, 150.0f, 300.0f, 382.5f }}});
@@ -97,7 +97,7 @@ BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
 
     BOOST_FIXTURE_TEST_CASE(SimpleDequantizeQsymm16, SimpleDequantizeFixtureQSymm16)
     {
-        RunTest<2, armnn::DataType::QuantisedSymm16 , armnn::DataType::Float32>(
+        RunTest<2, armnn::DataType::QSymmS16 , armnn::DataType::Float32>(
                 0,
                 {{"inputTensor",  { 0,    1,    5,    32767,    -1,   -32768 }}},
                 {{"outputTensor", { 0.0f, 1.5f, 7.5f, 49150.5f, -1.5f,-49152.0f }}});
index 1ec87f97d55e9bf6704438aa9eda3b7a9bec3b2d..f12b2b94d617c08a5bd7d66a4e266212cab60d1a 100644 (file)
@@ -220,7 +220,7 @@ BOOST_FIXTURE_TEST_CASE( ParseDetectionPostProcess, ParseDetectionPostProcessCus
         { "num_detections", numDetections}
     };
 
-    RunTest<armnn::DataType::QuantisedAsymm8, armnn::DataType::Float32>(0, input, output);
+    RunTest<armnn::DataType::QAsymmU8, armnn::DataType::Float32>(0, input, output);
 }
 
 BOOST_FIXTURE_TEST_CASE(DetectionPostProcessGraphStructureTest, ParseDetectionPostProcessCustomOptions)
@@ -288,8 +288,8 @@ BOOST_FIXTURE_TEST_CASE(DetectionPostProcessGraphStructureTest, ParseDetectionPo
     BOOST_TEST(CheckNumberOfOutputSlot(numDetectionsLayer, 0));
 
     // Check the connections
-    armnn::TensorInfo boxEncodingTensor(armnn::TensorShape({ 1, 6, 4 }), armnn::DataType::QuantisedAsymm8, 1, 1);
-    armnn::TensorInfo scoresTensor(armnn::TensorShape({ 1, 6, 3 }), armnn::DataType::QuantisedAsymm8,
+    armnn::TensorInfo boxEncodingTensor(armnn::TensorShape({ 1, 6, 4 }), armnn::DataType::QAsymmU8, 1, 1);
+    armnn::TensorInfo scoresTensor(armnn::TensorShape({ 1, 6, 3 }), armnn::DataType::QAsymmU8,
                                                       0.00999999978f, 0);
 
     armnn::TensorInfo detectionBoxesTensor(armnn::TensorShape({ 1, 3, 4 }), armnn::DataType::Float32, 0, 0);
index 54d7bcb1dca51e4937a4ae041f4d4d31042ada36..d1223d5af2ff272f1aa4c852a007c4d3ef75aadc 100644 (file)
@@ -125,7 +125,7 @@ struct FullyConnectedWithNoBiasFixture : FullyConnectedFixture
 
 BOOST_FIXTURE_TEST_CASE(FullyConnectedWithNoBias, FullyConnectedWithNoBiasFixture)
 {
-    RunTest<2, armnn::DataType::QuantisedAsymm8>(
+    RunTest<2, armnn::DataType::QAsymmU8>(
         0,
         { 10, 20, 30, 40 },
         { 400/2 });
@@ -145,7 +145,7 @@ struct FullyConnectedWithBiasFixture : FullyConnectedFixture
 
 BOOST_FIXTURE_TEST_CASE(ParseFullyConnectedWithBias, FullyConnectedWithBiasFixture)
 {
-    RunTest<2, armnn::DataType::QuantisedAsymm8>(
+    RunTest<2, armnn::DataType::QAsymmU8>(
         0,
         { 10, 20, 30, 40 },
         { (400+10)/2 });
@@ -165,7 +165,7 @@ struct FullyConnectedWithBiasMultipleOutputsFixture : FullyConnectedFixture
 
 BOOST_FIXTURE_TEST_CASE(FullyConnectedWithBiasMultipleOutputs, FullyConnectedWithBiasMultipleOutputsFixture)
 {
-    RunTest<2, armnn::DataType::QuantisedAsymm8>(
+    RunTest<2, armnn::DataType::QAsymmU8>(
             0,
             { 1, 2, 3, 4, 10, 20, 30, 40 },
             { (40+10)/2, (400+10)/2 });
index 759fc37ccd6a1324cec45fcf86339f4df701232b..8cbef97e2fe72c8378a60fadedc4ed6f4617134b 100644 (file)
@@ -98,7 +98,7 @@ struct MaxPoolLiteFixtureUint2DOutput : MaxPool2DFixture
 
 BOOST_FIXTURE_TEST_CASE(MaxPoolLiteUint1DOutput, MaxPoolLiteFixtureUint1DOutput)
 {
-    RunTest<4, armnn::DataType::QuantisedAsymm8>(0, { 2, 3, 5, 2 }, { 5 });
+    RunTest<4, armnn::DataType::QAsymmU8>(0, { 2, 3, 5, 2 }, { 5 });
 }
 
 BOOST_FIXTURE_TEST_CASE(MaxPoolLiteFloat1DOutput, MaxPoolLiteFixtureFloat1DOutput)
@@ -108,13 +108,13 @@ BOOST_FIXTURE_TEST_CASE(MaxPoolLiteFloat1DOutput, MaxPoolLiteFixtureFloat1DOutpu
 
 BOOST_FIXTURE_TEST_CASE(MaxPoolLiteUint2DOutput, MaxPoolLiteFixtureUint2DOutput)
 {
-    RunTest<4, armnn::DataType::QuantisedAsymm8>(
+    RunTest<4, armnn::DataType::QAsymmU8>(
         0, { 1, 2, 2, 3, 5, 6, 7, 8, 3, 2, 1, 0, 1, 2, 3, 4 }, { 6, 8, 3, 4 });
 }
 
 BOOST_FIXTURE_TEST_CASE(MaxPoolIncorrectDataTypeError, MaxPoolLiteFixtureFloat1DOutput)
 {
-    BOOST_CHECK_THROW((RunTest<4, armnn::DataType::QuantisedAsymm8>(0, { 2, 3, 5, 2 }, { 5 })), armnn::Exception);
+    BOOST_CHECK_THROW((RunTest<4, armnn::DataType::QAsymmU8>(0, { 2, 3, 5, 2 }, { 5 })), armnn::Exception);
 }
 
 BOOST_AUTO_TEST_SUITE_END()
index 62fbad6953faed9e39d03d718024cb88dbe02c29..6ed568ceff098505c09befc08cba4bed6670085b 100644 (file)
@@ -86,7 +86,7 @@ struct ReshapeFixtureWithReshapeDims : ReshapeFixture
 BOOST_FIXTURE_TEST_CASE(ParseReshapeWithReshapeDims, ReshapeFixtureWithReshapeDims)
 {
     SetupSingleInputSingleOutput("inputTensor", "outputTensor");
-    RunTest<2, armnn::DataType::QuantisedAsymm8>(0,
+    RunTest<2, armnn::DataType::QAsymmU8>(0,
                                                  { 1, 2, 3, 4, 5, 6, 7, 8, 9 },
                                                  { 1, 2, 3, 4, 5, 6, 7, 8, 9 });
     BOOST_TEST((m_Parser->GetNetworkOutputBindingInfo(0, "outputTensor").second.GetShape()
@@ -101,7 +101,7 @@ struct ReshapeFixtureWithReshapeDimsFlatten : ReshapeFixture
 BOOST_FIXTURE_TEST_CASE(ParseReshapeWithReshapeDimsFlatten, ReshapeFixtureWithReshapeDimsFlatten)
 {
     SetupSingleInputSingleOutput("inputTensor", "outputTensor");
-    RunTest<1, armnn::DataType::QuantisedAsymm8>(0,
+    RunTest<1, armnn::DataType::QAsymmU8>(0,
                                                  { 1, 2, 3, 4, 5, 6, 7, 8, 9 },
                                                  { 1, 2, 3, 4, 5, 6, 7, 8, 9 });
     BOOST_TEST((m_Parser->GetNetworkOutputBindingInfo(0, "outputTensor").second.GetShape()
@@ -116,7 +116,7 @@ struct ReshapeFixtureWithReshapeDimsFlattenTwoDims : ReshapeFixture
 BOOST_FIXTURE_TEST_CASE(ParseReshapeWithReshapeDimsFlattenTwoDims, ReshapeFixtureWithReshapeDimsFlattenTwoDims)
 {
     SetupSingleInputSingleOutput("inputTensor", "outputTensor");
-    RunTest<2, armnn::DataType::QuantisedAsymm8>(0,
+    RunTest<2, armnn::DataType::QAsymmU8>(0,
                                                  { 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6 },
                                                  { 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6 });
     BOOST_TEST((m_Parser->GetNetworkOutputBindingInfo(0, "outputTensor").second.GetShape()
@@ -131,7 +131,7 @@ struct ReshapeFixtureWithReshapeDimsFlattenOneDim : ReshapeFixture
 BOOST_FIXTURE_TEST_CASE(ParseReshapeWithReshapeDimsFlattenOneDim, ReshapeFixtureWithReshapeDimsFlattenOneDim)
 {
     SetupSingleInputSingleOutput("inputTensor", "outputTensor");
-    RunTest<3, armnn::DataType::QuantisedAsymm8>(0,
+    RunTest<3, armnn::DataType::QAsymmU8>(0,
                                                  { 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6 },
                                                  { 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6 });
     BOOST_TEST((m_Parser->GetNetworkOutputBindingInfo(0, "outputTensor").second.GetShape()
index dacd9463524fd5781e29fcdd05a4accc983288cd..c4d19f2ac01393a598c8bd7e0f6b698cd339485b 100644 (file)
@@ -71,7 +71,7 @@ struct SoftmaxFixture : public ParserFlatbuffersFixture
 
 BOOST_FIXTURE_TEST_CASE(ParseSoftmaxLite, SoftmaxFixture)
 {
-    RunTest<2, armnn::DataType::QuantisedAsymm8>(0, { 0, 0, 100, 0, 0, 0, 0 }, { 0, 0, 255, 0, 0, 0, 0 });
+    RunTest<2, armnn::DataType::QAsymmU8>(0, { 0, 0, 100, 0, 0, 0, 0 }, { 0, 0, 255, 0, 0, 0, 0 });
 }
 
 BOOST_AUTO_TEST_SUITE_END()
index 977bd7b6a4562262cf2469e38948bc3f6188365f..5f23799fd690f133645e23f593b1bf6e3b0ef23d 100644 (file)
@@ -179,7 +179,7 @@ struct SimpleSplitFixtureUint8 : SplitFixture
 BOOST_FIXTURE_TEST_CASE(ParseAxisOneSplitTwoUint8, SimpleSplitFixtureUint8)
 {
 
-    RunTest<4, armnn::DataType::QuantisedAsymm8>(
+    RunTest<4, armnn::DataType::QAsymmU8>(
         0,
         { {"inputTensor", { 1, 2, 3, 4, 5, 6, 7, 8,
                             9, 10, 11, 12, 13, 14, 15, 16 } } },
@@ -196,7 +196,7 @@ struct SimpleSplitAxisThreeFixtureUint8 : SplitFixture
 
 BOOST_FIXTURE_TEST_CASE(ParseAxisThreeSplitTwoUint8, SimpleSplitAxisThreeFixtureUint8)
 {
-    RunTest<4, armnn::DataType::QuantisedAsymm8>(
+    RunTest<4, armnn::DataType::QAsymmU8>(
         0,
         { {"inputTensor", { 1, 2, 3, 4, 5, 6, 7, 8,
                             9, 10, 11, 12, 13, 14, 15, 16 } } },
@@ -213,7 +213,7 @@ struct SimpleSplit2DFixtureUint8 : SplitFixture
 
 BOOST_FIXTURE_TEST_CASE(SimpleSplit2DUint8, SimpleSplit2DFixtureUint8)
 {
-    RunTest<2, armnn::DataType::QuantisedAsymm8>(
+    RunTest<2, armnn::DataType::QAsymmU8>(
             0,
             { {"inputTensor", { 1, 2, 3, 4, 5, 6, 7, 8 } } },
             { {"outputTensor1", { 1, 2, 3, 4 } },
@@ -229,7 +229,7 @@ struct SimpleSplit3DFixtureUint8 : SplitFixture
 
 BOOST_FIXTURE_TEST_CASE(SimpleSplit3DUint8, SimpleSplit3DFixtureUint8)
 {
-    RunTest<3, armnn::DataType::QuantisedAsymm8>(
+    RunTest<3, armnn::DataType::QAsymmU8>(
         0,
         { {"inputTensor", { 1, 2, 3, 4, 5, 6, 7, 8,
                             9, 10, 11, 12, 13, 14, 15, 16 } } },
index 13261facf1b01ea54b1cbf49c90470283a86c7c5..86a1966dd12a89bb2564e4a4e4a16c8e2b633dfb 100644 (file)
@@ -85,7 +85,7 @@ struct SqueezeFixtureWithSqueezeDims : SqueezeFixture
 BOOST_FIXTURE_TEST_CASE(ParseSqueezeWithSqueezeDims, SqueezeFixtureWithSqueezeDims)
 {
     SetupSingleInputSingleOutput("inputTensor", "outputTensor");
-    RunTest<3, armnn::DataType::QuantisedAsymm8>(0, { 1, 2, 3, 4 }, { 1, 2, 3, 4 });
+    RunTest<3, armnn::DataType::QAsymmU8>(0, { 1, 2, 3, 4 }, { 1, 2, 3, 4 });
     BOOST_TEST((m_Parser->GetNetworkOutputBindingInfo(0, "outputTensor").second.GetShape()
         == armnn::TensorShape({2,2,1})));
 
@@ -99,7 +99,7 @@ struct SqueezeFixtureWithoutSqueezeDims : SqueezeFixture
 BOOST_FIXTURE_TEST_CASE(ParseSqueezeWithoutSqueezeDims, SqueezeFixtureWithoutSqueezeDims)
 {
     SetupSingleInputSingleOutput("inputTensor", "outputTensor");
-    RunTest<2, armnn::DataType::QuantisedAsymm8>(0, { 1, 2, 3, 4 }, { 1, 2, 3, 4 });
+    RunTest<2, armnn::DataType::QAsymmU8>(0, { 1, 2, 3, 4 }, { 1, 2, 3, 4 });
     BOOST_TEST((m_Parser->GetNetworkOutputBindingInfo(0, "outputTensor").second.GetShape()
         == armnn::TensorShape({2,2})));
 }
index 0a3f58b519d59ac30a535a7ac6f0a93e70a05d36..6a251a5f74bf1cceaffe381f1d6162c398c70a83 100644 (file)
@@ -97,7 +97,7 @@ struct SimpleSubFixture : SubFixture
 
 BOOST_FIXTURE_TEST_CASE(SimpleSub, SimpleSubFixture)
 {
-  RunTest<2, armnn::DataType::QuantisedAsymm8>(
+  RunTest<2, armnn::DataType::QAsymmU8>(
       0,
       {{"inputTensor1", { 4, 5, 6, 7 }},
       {"inputTensor2", { 3, 2, 1, 0 }}},
index 46b02ac9569307d7525bb7e72fda966f54e1ed55..084a286dbd278ca5a10c984aa585fb4971f143ac 100644 (file)
@@ -118,7 +118,7 @@ struct SimpleTransposeConvFixture : TransposeConvFixture
 
 BOOST_FIXTURE_TEST_CASE( ParseSimpleTransposeConv, SimpleTransposeConvFixture )
 {
-    RunTest<4, armnn::DataType::QuantisedAsymm8>(
+    RunTest<4, armnn::DataType::QAsymmU8>(
         0,
         {
             1, 2,
index 04fd50dc39150218d1e4053a733a89015c837c6a..4fcd74f58593cf8d9a956d06e66942b5bb0a4fdf 100644 (file)
@@ -126,7 +126,7 @@ BOOST_FIXTURE_TEST_CASE(UnpackAxisZeroNumIsDefaultNotSpecified, DefaultUnpackAxi
 
 BOOST_FIXTURE_TEST_CASE(UnpackAxisZeroNumIsDefaultNotSpecifiedUint8, DefaultUnpackAxisZeroUint8Fixture)
 {
-    RunTest<2, armnn::DataType::QuantisedAsymm8>(
+    RunTest<2, armnn::DataType::QAsymmU8>(
         0,
         { {"inputTensor", { 1, 2, 3, 4, 5, 6,
                             7, 8, 9, 10, 11, 12,
@@ -165,7 +165,7 @@ BOOST_FIXTURE_TEST_CASE(UnpackLastAxisNumSix, DefaultUnpackLastAxisFixture)
 }
 
 BOOST_FIXTURE_TEST_CASE(UnpackLastAxisNumSixUint8, DefaultUnpackLastAxisUint8Fixture) {
-    RunTest<2, armnn::DataType::QuantisedAsymm8>(
+    RunTest<2, armnn::DataType::QAsymmU8>(
         0,
         {{"inputTensor", { 1, 2, 3, 4, 5, 6,
                            7, 8, 9, 10, 11, 12,
index 9a9cdc5156869f9e19faea4cbea2870c74e8353d..39dee679fd21f7409d671685ed3853ffe1189c59 100644 (file)
@@ -194,7 +194,7 @@ private:
         switch (dataType)
         {
             case DataType::Float32:         return "FLOAT32";
-            case DataType::QuantisedAsymm8: return "UINT8";
+            case DataType::QAsymmU8: return "UINT8";
             default:                        return "UNKNOWN";
         }
     }
index 9250b61ec9009497c900b85deae38f7da6048ee5..1cad92f58ab6af3d03a8296ad2dc48e68ab0e791 100644 (file)
@@ -23,9 +23,9 @@ arm_compute::DataType GetArmComputeDataType(armnn::DataType dataType)
             return arm_compute::DataType::F16;
         case armnn::DataType::Float32:
             return arm_compute::DataType::F32;
-        case armnn::DataType::QuantisedAsymm8:
+        case armnn::DataType::QAsymmU8:
             return arm_compute::DataType::QASYMM8;
-        case armnn::DataType::QuantisedSymm16:
+        case armnn::DataType::QSymmS16:
             return arm_compute::DataType::QSYMM16;
         case armnn::DataType::QSymmS8:
             return arm_compute::DataType::QSYMM8;
index 1e2f0db600f757ef1fde378bfecd6f6d8c8671bc..4ab748806c713b0510a33c07b64ce243bd93aa98 100644 (file)
@@ -15,7 +15,7 @@ BOOST_AUTO_TEST_CASE(BuildArmComputeTensorInfoTest)
 {
 
     const armnn::TensorShape tensorShape = { 1, 2, 3, 4 };
-    const armnn::DataType dataType = armnn::DataType::QuantisedAsymm8;
+    const armnn::DataType dataType = armnn::DataType::QAsymmU8;
 
     const std::vector<float> quantScales = { 1.5f, 2.5f, 3.5f, 4.5f };
     const float quantScale = quantScales[0];
index 08189f9999852fe9e00a6ed18c88a7d7e6f9339f..d8b6af8a3063c318ed1926fa00f85977fc551dff 100644 (file)
@@ -23,9 +23,9 @@ inline armnn::Optional<armnn::DataType> GetBiasTypeFromWeightsType(armnn::Option
         case armnn::DataType::Float16:
         case armnn::DataType::Float32:
             return weightsType;
-        case armnn::DataType::QuantisedAsymm8:
+        case armnn::DataType::QAsymmU8:
             return armnn::DataType::Signed32;
-        case armnn::DataType::QuantisedSymm16:
+        case armnn::DataType::QSymmS16:
             return armnn::DataType::Signed32;
         default:
             BOOST_ASSERT_MSG(false, "GetBiasTypeFromWeightsType(): Unsupported data type.");
index 9d8174ce7dea4bdc618710ece8c0d06e75ac46f2..75db73c32f21726010ae42dcf4edfd77b39fb0ce 100644 (file)
@@ -56,13 +56,13 @@ std::unique_ptr<IWorkload> MakeWorkloadHelper(const QueueDescriptorType& descrip
             return MakeWorkloadForType<Float16Workload>::Func(descriptor, info, std::forward<Args>(args)...);
         case DataType::Float32:
             return MakeWorkloadForType<Float32Workload>::Func(descriptor, info, std::forward<Args>(args)...);
-        case DataType::QuantisedAsymm8:
+        case DataType::QAsymmU8:
             return MakeWorkloadForType<Uint8Workload>::Func(descriptor, info, std::forward<Args>(args)...);
         case DataType::Signed32:
             return MakeWorkloadForType<Int32Workload>::Func(descriptor, info, std::forward<Args>(args)...);
         case DataType::Boolean:
             return MakeWorkloadForType<BooleanWorkload>::Func(descriptor, info, std::forward<Args>(args)...);
-        case DataType::QuantisedSymm16:
+        case DataType::QSymmS16:
             return nullptr;
         default:
             BOOST_ASSERT_MSG(false, "Unknown DataType.");
index e03068618b1b482311558b4c95f4d9ac65c106e5..f7895a6f1da3302747491a1dfe3219ff9ead9669 100644 (file)
@@ -172,7 +172,7 @@ template <typename QueueDescriptor>
 using Float32Workload = TypedWorkload<QueueDescriptor, armnn::DataType::Float32>;
 
 template <typename QueueDescriptor>
-using Uint8Workload = TypedWorkload<QueueDescriptor, armnn::DataType::QuantisedAsymm8>;
+using Uint8Workload = TypedWorkload<QueueDescriptor, armnn::DataType::QAsymmU8>;
 
 template <typename QueueDescriptor>
 using Int32Workload = TypedWorkload<QueueDescriptor, armnn::DataType::Signed32>;
@@ -187,7 +187,7 @@ using BaseFloat32ComparisonWorkload = MultiTypedWorkload<QueueDescriptor,
 
 template <typename QueueDescriptor>
 using BaseUint8ComparisonWorkload = MultiTypedWorkload<QueueDescriptor,
-                                                       armnn::DataType::QuantisedAsymm8,
+                                                       armnn::DataType::QAsymmU8,
                                                        armnn::DataType::Boolean>;
 
 template <typename QueueDescriptor>
@@ -202,7 +202,7 @@ using Float32ToFloat16Workload = MultiTypedWorkload<QueueDescriptor,
 
 template <typename QueueDescriptor>
 using Uint8ToFloat32Workload = MultiTypedWorkload<QueueDescriptor,
-                                                  armnn::DataType::QuantisedAsymm8,
+                                                  armnn::DataType::QAsymmU8,
                                                   armnn::DataType::Float32>;
 
 } //namespace armnn
index c3dd601fbdfa8195e9933dc06529386ab3cdf227..fa5c6fe38e82f5f58d88278b5c9955b9182193bf 100644 (file)
@@ -30,9 +30,9 @@ DataType GetBiasDataType(DataType inputDataType)
             return DataType::Float16;
         case DataType::Float32:
             return DataType::Float32;
-        case DataType::QuantisedAsymm8:
+        case DataType::QAsymmU8:
             return DataType::Signed32;
-        case DataType::QuantisedSymm16:
+        case DataType::QSymmS16:
             return DataType::Signed32;
         default:
             BOOST_ASSERT_MSG(false, "Invalid input data type");
@@ -342,11 +342,11 @@ void ValidateWeightDataType(const TensorInfo& inputInfo,
                             const std::string& descName)
 {
     const DataType inputType = inputInfo.GetDataType();
-    if (inputType == DataType::QuantisedAsymm8)
+    if (inputType == DataType::QAsymmU8)
     {
         const std::vector<DataType> validTypes =
         {
-            DataType::QuantisedAsymm8,
+            DataType::QAsymmU8,
             DataType::QuantizedSymm8PerAxis
         };
 
@@ -403,7 +403,7 @@ void ValidatePerAxisQuantization(const TensorInfo& inputInfo,
         const DataType outputDataType = outputInfo.GetDataType();
 
         const bool canHavePerAxisQuantization =
-            inputDataType == DataType::QuantisedAsymm8 && inputDataType == outputDataType;
+            inputDataType == DataType::QAsymmU8 && inputDataType == outputDataType;
 
         if (!canHavePerAxisQuantization)
         {
@@ -580,8 +580,8 @@ void ActivationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
     {
         DataType::Float16,
         DataType::Float32,
-        DataType::QuantisedAsymm8,
-        DataType::QuantisedSymm16
+        DataType::QAsymmU8,
+        DataType::QSymmS16
     };
 
     ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
@@ -608,8 +608,8 @@ void ArgMinMaxQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
     {
         DataType::Float16,
         DataType::Float32,
-        DataType::QuantisedAsymm8,
-        DataType::QuantisedSymm16,
+        DataType::QAsymmU8,
+        DataType::QSymmS16,
         DataType::Signed32
     };
 
@@ -665,8 +665,8 @@ void SoftmaxQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
     {
         DataType::Float16,
         DataType::Float32,
-        DataType::QuantisedAsymm8,
-        DataType::QuantisedSymm16
+        DataType::QAsymmU8,
+        DataType::QSymmS16
     };
 
     ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
@@ -687,8 +687,8 @@ void SplitterQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
         DataType::Float16,
         DataType::Boolean,
         DataType::Signed32,
-        DataType::QuantisedAsymm8,
-        DataType::QuantisedSymm16
+        DataType::QAsymmU8,
+        DataType::QSymmS16
     };
 
     const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
@@ -823,8 +823,8 @@ void ConcatQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
         DataType::Float16,
         DataType::Boolean,
         DataType::Signed32,
-        DataType::QuantisedAsymm8,
-        DataType::QuantisedSymm16
+        DataType::QAsymmU8,
+        DataType::QSymmS16
     };
 
     const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
@@ -910,8 +910,8 @@ void StackQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
         DataType::Float16,
         DataType::Boolean,
         DataType::Signed32,
-        DataType::QuantisedAsymm8,
-        DataType::QuantisedSymm16
+        DataType::QAsymmU8,
+        DataType::QSymmS16
     };
 
     ValidateDataTypes(workloadInfo.m_InputTensorInfos[0], supportedTypes, descriptorName);
@@ -971,8 +971,8 @@ void FullyConnectedQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) c
     {
         DataType::Float32,
         DataType::Float16,
-        DataType::QuantisedAsymm8,
-        DataType::QuantisedSymm16
+        DataType::QAsymmU8,
+        DataType::QSymmS16
     };
 
     ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
@@ -994,8 +994,8 @@ void NormalizationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) co
     {
         DataType::Float16,
         DataType::Float32,
-        DataType::QuantisedAsymm8,
-        DataType::QuantisedSymm16
+        DataType::QAsymmU8,
+        DataType::QSymmS16
     };
 
     ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
@@ -1019,8 +1019,8 @@ void AdditionQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
     std::vector<DataType> supportedTypes =
     {
         DataType::Float32,
-        DataType::QuantisedAsymm8,
-        DataType::QuantisedSymm16,
+        DataType::QAsymmU8,
+        DataType::QSymmS16,
         DataType::Float16
     };
 
@@ -1053,8 +1053,8 @@ void MultiplicationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) c
     std::vector<DataType> supportedTypes =
     {
         DataType::Float32,
-        DataType::QuantisedAsymm8,
-        DataType::QuantisedSymm16,
+        DataType::QAsymmU8,
+        DataType::QSymmS16,
         DataType::Float16
     };
 
@@ -1087,8 +1087,8 @@ void BatchNormalizationQueueDescriptor::Validate(const WorkloadInfo& workloadInf
     {
         DataType::Float16,
         DataType::Float32,
-        DataType::QuantisedAsymm8,
-        DataType::QuantisedSymm16
+        DataType::QAsymmU8,
+        DataType::QSymmS16
     };
 
     ValidateDataTypes(inputTensorInfo,  supportedTypes, descriptorName);
@@ -1159,8 +1159,8 @@ void Convolution2dQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) co
     std::vector<DataType> supportedTypes =
     {
         DataType::Float32,
-        DataType::QuantisedAsymm8,
-        DataType::QuantisedSymm16,
+        DataType::QAsymmU8,
+        DataType::QSymmS16,
         DataType::Float16
     };
 
@@ -1232,8 +1232,8 @@ void DepthwiseConvolution2dQueueDescriptor::Validate(const WorkloadInfo& workloa
     std::vector<DataType> supportedTypes =
     {
         DataType::Float32,
-        DataType::QuantisedAsymm8,
-        DataType::QuantisedSymm16,
+        DataType::QAsymmU8,
+        DataType::QSymmS16,
         DataType::Float16
     };
 
@@ -1287,8 +1287,8 @@ void Pooling2dQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
     {
         DataType::Float32,
         DataType::Float16,
-        DataType::QuantisedAsymm8,
-        DataType::QuantisedSymm16
+        DataType::QAsymmU8,
+        DataType::QSymmS16
     };
 
     ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
@@ -1312,8 +1312,8 @@ void ResizeBilinearQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) c
     {
         DataType::Float16,
         DataType::Float32,
-        DataType::QuantisedAsymm8,
-        DataType::QuantisedSymm16
+        DataType::QAsymmU8,
+        DataType::QSymmS16
     };
 
     ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
@@ -1359,8 +1359,8 @@ void ResizeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
     {
         DataType::Float16,
         DataType::Float32,
-        DataType::QuantisedAsymm8,
-        DataType::QuantisedSymm16
+        DataType::QAsymmU8,
+        DataType::QSymmS16
     };
 
     ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
@@ -1460,8 +1460,8 @@ void L2NormalizationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo)
     {
         DataType::Float32,
         DataType::Float16,
-        DataType::QuantisedAsymm8,
-        DataType::QuantisedSymm16
+        DataType::QAsymmU8,
+        DataType::QSymmS16
     };
 
     ValidateDataTypes(inputTensorInfo,  supportedTypes, descriptorName);
@@ -1511,8 +1511,8 @@ void ConstantQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
         DataType::Float32,
         DataType::Float16,
         DataType::Signed32,
-        DataType::QuantisedAsymm8,
-        DataType::QuantisedSymm16
+        DataType::QAsymmU8,
+        DataType::QSymmS16
     };
 
     ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
@@ -1536,8 +1536,8 @@ void ReshapeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
         DataType::Float32,
         DataType::Float16,
         DataType::Signed32,
-        DataType::QuantisedAsymm8,
-        DataType::QuantisedSymm16
+        DataType::QAsymmU8,
+        DataType::QSymmS16
     };
 
     ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
@@ -1601,8 +1601,8 @@ void SpaceToBatchNdQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) c
     {
             DataType::Float16,
             DataType::Float32,
-            DataType::QuantisedAsymm8,
-            DataType::QuantisedSymm16
+            DataType::QAsymmU8,
+            DataType::QSymmS16
     };
 
     ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
@@ -1626,8 +1626,8 @@ void SpaceToDepthQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) con
     {
         DataType::Float32,
         DataType::Float16,
-        DataType::QuantisedAsymm8,
-        DataType::QuantisedSymm16
+        DataType::QAsymmU8,
+        DataType::QSymmS16
     };
 
     ValidateDataTypes(inputTensorInfo,  supportedTypes, descriptorName);
@@ -1674,7 +1674,7 @@ void FloorQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
     {
         DataType::Float32,
         DataType::Float16,
-        DataType::QuantisedSymm16
+        DataType::QSymmS16
     };
 
     ValidateDataTypes(inputTensorInfo,  supportedTypes, descriptorName);
@@ -1705,7 +1705,7 @@ void LstmQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
     {
         DataType::Float16,
         DataType::Float32,
-        DataType::QuantisedSymm16
+        DataType::QSymmS16
     };
 
     // check for supported type of one input and match them with all the other input and output
@@ -2016,8 +2016,8 @@ void DivisionQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
     std::vector<DataType> supportedTypes =
     {
         DataType::Float32,
-        DataType::QuantisedAsymm8,
-        DataType::QuantisedSymm16,
+        DataType::QAsymmU8,
+        DataType::QSymmS16,
         DataType::Float16
     };
 
@@ -2047,8 +2047,8 @@ void SubtractionQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) cons
     std::vector<DataType> supportedTypes =
     {
         DataType::Float32,
-        DataType::QuantisedAsymm8,
-        DataType::QuantisedSymm16,
+        DataType::QAsymmU8,
+        DataType::QSymmS16,
         DataType::Float16
     };
 
@@ -2080,8 +2080,8 @@ void MaximumQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
         DataType::Float16,
         DataType::Float32,
         DataType::Signed32,
-        DataType::QuantisedAsymm8,
-        DataType::QuantisedSymm16
+        DataType::QAsymmU8,
+        DataType::QSymmS16
     };
 
     ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
@@ -2110,8 +2110,8 @@ void MeanQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
     {
         DataType::Float32,
         DataType::Float16,
-        DataType::QuantisedAsymm8,
-        DataType::QuantisedSymm16
+        DataType::QAsymmU8,
+        DataType::QSymmS16
     };
 
     // First check if input tensor data type is supported, then
@@ -2178,9 +2178,9 @@ void QuantizeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
 
     ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
 
-    if (outputTensorInfo.GetDataType() != DataType::QuantisedAsymm8 &&
+    if (outputTensorInfo.GetDataType() != DataType::QAsymmU8 &&
         outputTensorInfo.GetDataType() != DataType::QSymmS8 &&
-        outputTensorInfo.GetDataType() != DataType::QuantisedSymm16)
+        outputTensorInfo.GetDataType() != DataType::QSymmS16)
     {
         throw InvalidArgumentException(descriptorName + ": Output of quantized layer must be quantized type.");
     }
@@ -2200,8 +2200,8 @@ void BatchToSpaceNdQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) c
     {
         DataType::Float32,
         DataType::Float16,
-        DataType::QuantisedAsymm8,
-        DataType::QuantisedSymm16
+        DataType::QAsymmU8,
+        DataType::QSymmS16
     };
 
     ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
@@ -2222,8 +2222,8 @@ void StridedSliceQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) con
     {
         DataType::Float16,
         DataType::Float32,
-        DataType::QuantisedAsymm8,
-        DataType::QuantisedSymm16
+        DataType::QAsymmU8,
+        DataType::QSymmS16
     };
 
     ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
@@ -2279,8 +2279,8 @@ void MinimumQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
         DataType::Float16,
         DataType::Float32,
         DataType::Signed32,
-        DataType::QuantisedAsymm8,
-        DataType::QuantisedSymm16
+        DataType::QAsymmU8,
+        DataType::QSymmS16
     };
 
     ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
@@ -2367,8 +2367,8 @@ void RsqrtQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
     {
         DataType::Float16,
         DataType::Float32,
-        DataType::QuantisedAsymm8,
-        DataType::QuantisedSymm16
+        DataType::QAsymmU8,
+        DataType::QSymmS16
     };
 
     ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
@@ -2395,8 +2395,8 @@ void GatherQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
     {
         DataType::Float16,
         DataType::Float32,
-        DataType::QuantisedAsymm8,
-        DataType::QuantisedSymm16
+        DataType::QAsymmU8,
+        DataType::QSymmS16
     };
 
     ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
@@ -2441,8 +2441,8 @@ void DetectionPostProcessQueueDescriptor::Validate(const WorkloadInfo& workloadI
     {
         DataType::Float32,
         DataType::Float16,
-        DataType::QuantisedAsymm8,
-        DataType::QuantisedSymm16
+        DataType::QAsymmU8,
+        DataType::QSymmS16
     };
 
     ValidateDataTypes(boxEncodingsInfo, supportedInputTypes, descriptorName);
@@ -2531,8 +2531,8 @@ void SwitchQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
     std::vector<DataType> supportedTypes =
     {
         DataType::Float32,
-        DataType::QuantisedAsymm8,
-        DataType::QuantisedSymm16
+        DataType::QAsymmU8,
+        DataType::QSymmS16
     };
 
     ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
@@ -2574,8 +2574,8 @@ void PreluQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
     {
         DataType::Float16,
         DataType::Float32,
-        DataType::QuantisedAsymm8,
-        DataType::QuantisedSymm16
+        DataType::QAsymmU8,
+        DataType::QSymmS16
     };
 
     ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
@@ -2636,8 +2636,8 @@ void TransposeConvolution2dQueueDescriptor::Validate(const WorkloadInfo& workloa
     {
         DataType::Float32,
         DataType::Float16,
-        DataType::QuantisedAsymm8,
-        DataType::QuantisedSymm16
+        DataType::QAsymmU8,
+        DataType::QSymmS16
     };
 
     ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
@@ -2662,17 +2662,17 @@ void QuantizedLstmQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) co
 
     std::vector<DataType> inputOutputSupportedTypes =
     {
-        DataType::QuantisedAsymm8
+        DataType::QAsymmU8
     };
 
     std::vector<DataType> cellStateSupportedTypes =
     {
-        DataType::QuantisedSymm16
+        DataType::QSymmS16
     };
 
     std::vector<DataType> weightsSupportedTypes =
     {
-        DataType::QuantisedAsymm8
+        DataType::QAsymmU8
     };
 
     std::vector<DataType> biasSupportedTypes =
@@ -2831,8 +2831,8 @@ void AbsQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
     {
         DataType::Float16,
         DataType::Float32,
-        DataType::QuantisedAsymm8,
-        DataType::QuantisedSymm16
+        DataType::QAsymmU8,
+        DataType::QSymmS16
     };
 
     ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
@@ -2911,8 +2911,8 @@ void DepthToSpaceQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) con
     {
         DataType::Float32,
         DataType::Float16,
-        DataType::QuantisedAsymm8,
-        DataType::QuantisedSymm16
+        DataType::QAsymmU8,
+        DataType::QSymmS16
     };
 
     ValidateDataTypes(inputInfo,  supportedTypes, descriptorName);
index a4327e441af3dc4cdc846c11f61591799314c780..54ae585a82e3907098fc21a8d4ea00f68e4e8ec2 100644 (file)
@@ -347,8 +347,8 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId,
                         biasInfoPtr = &dummyFloat32Bias;
                         break;
                     }
-                    case DataType::QuantisedAsymm8:
-                    case DataType::QuantisedSymm16:
+                    case DataType::QAsymmU8:
+                    case DataType::QSymmS16:
                     {
                         biasInfoPtr = &dummyQA8Bias;
                         break;
index 1a45a9f6e95be25652c0dafcb164244bbb3772a7..cb1f7c117a89bc810fa47468b37ea777a34b2387 100644 (file)
@@ -164,7 +164,7 @@ armnn::ConstTensor ConvertWeightTensorFromArmnnToAcl(const ConstCpuTensorHandle*
                 weightPermuted =
                     ReorderWeightChannelsForAcl<half_float::half>(weightPermuted, dataLayout, permuteBuffer);
                 break;
-            case DataType::QuantisedAsymm8:
+            case DataType::QAsymmU8:
                 weightPermuted = ReorderWeightChannelsForAcl<uint8_t>(weightPermuted, dataLayout, permuteBuffer);
                 break;
             case DataType::QuantizedSymm8PerAxis:
index d6f589fa0008289b1abab22979e880281d5356b6..4b9bf7a711d49482b37d26951cc139b48e277ad6 100644 (file)
@@ -88,7 +88,7 @@ inline bool ConstantUsageFloat32Test(const std::vector<BackendId>& backends)
 
 inline bool ConstantUsageUint8Test(const std::vector<BackendId>& backends)
 {
-    TensorInfo commonTensorInfo({ 2, 3 }, DataType::QuantisedAsymm8);
+    TensorInfo commonTensorInfo({ 2, 3 }, DataType::QAsymmU8);
 
     const float scale = 0.023529f;
     const int8_t offset = -43;
index 6924beb82075bc03958fbb52b8b0039bf766df56..031210f1fcb33b9eef642a7a0d3e0859c65ae536 100644 (file)
@@ -316,22 +316,22 @@ struct DummyLayer<armnn::QuantizedLstmLayer, void>
         m_Layer = dummyGraph.AddLayer<armnn::QuantizedLstmLayer>("");
 
         m_Layer->m_QuantizedLstmParameters.m_InputToInputWeights  = std::make_unique<armnn::ScopedCpuTensorHandle>(
-                armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QuantisedAsymm8));
+                armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QAsymmU8));
         m_Layer->m_QuantizedLstmParameters.m_InputToForgetWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
-                armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QuantisedAsymm8));
+                armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QAsymmU8));
         m_Layer->m_QuantizedLstmParameters.m_InputToCellWeights   = std::make_unique<armnn::ScopedCpuTensorHandle>(
-                armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QuantisedAsymm8));
+                armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QAsymmU8));
         m_Layer->m_QuantizedLstmParameters.m_InputToOutputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
-                armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QuantisedAsymm8));
+                armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QAsymmU8));
 
         m_Layer->m_QuantizedLstmParameters.m_RecurrentToInputWeights  = std::make_unique<armnn::ScopedCpuTensorHandle>(
-                armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QuantisedAsymm8));
+                armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QAsymmU8));
         m_Layer->m_QuantizedLstmParameters.m_RecurrentToForgetWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
-                armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QuantisedAsymm8));
+                armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QAsymmU8));
         m_Layer->m_QuantizedLstmParameters.m_RecurrentToCellWeights   = std::make_unique<armnn::ScopedCpuTensorHandle>(
-                armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QuantisedAsymm8));
+                armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QAsymmU8));
         m_Layer->m_QuantizedLstmParameters.m_RecurrentToOutputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
-                armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QuantisedAsymm8));
+                armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QAsymmU8));
 
         m_Layer->m_QuantizedLstmParameters.m_InputGateBias  = std::make_unique<armnn::ScopedCpuTensorHandle>(
                 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Signed32));
index 9273a7910fa7272c991e0914822ec476b22568b3..850a4d37ee5bb4ec437fc9426de1dd608830f108 100644 (file)
@@ -147,12 +147,12 @@ std::string GetSoftmaxProfilerJson(const std::vector<armnn::BackendId>& backends
     softmax->GetOutputSlot(0).Connect(output->GetInputSlot(0));
 
     // set the tensors in the network
-    TensorInfo inputTensorInfo(TensorShape({1, 5}), DataType::QuantisedAsymm8);
+    TensorInfo inputTensorInfo(TensorShape({1, 5}), DataType::QAsymmU8);
     inputTensorInfo.SetQuantizationOffset(100);
     inputTensorInfo.SetQuantizationScale(10000.0f);
     input->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
 
-    TensorInfo outputTensorInfo(TensorShape({1, 5}), DataType::QuantisedAsymm8);
+    TensorInfo outputTensorInfo(TensorShape({1, 5}), DataType::QAsymmU8);
     outputTensorInfo.SetQuantizationOffset(0);
     outputTensorInfo.SetQuantizationScale(1.0f / 256.0f);
     softmax->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
index 08f696812e5fbc032e436004f19cf19617b00bcc..eb1b97665694a37cf3758a186dd33d6ac5b2bbfb 100644 (file)
@@ -172,9 +172,9 @@ BOOST_AUTO_TEST_CASE(ReleaseFullyConnectedLayerConstantDataTest)
     float outputQScale = 2.0f;
 
     layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({7, 20},
-                                                          DataType::QuantisedAsymm8, inputsQScale, 0));
+                                                          DataType::QAsymmU8, inputsQScale, 0));
     layer->m_Bias   = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({7},
-                                                          GetBiasDataType(DataType::QuantisedAsymm8), inputsQScale));
+                                                          GetBiasDataType(DataType::QAsymmU8), inputsQScale));
     layer->m_Weight->Allocate();
     layer->m_Bias->Allocate();
 
@@ -183,8 +183,8 @@ BOOST_AUTO_TEST_CASE(ReleaseFullyConnectedLayerConstantDataTest)
     Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
 
     // connect up
-    Connect(input, layer, TensorInfo({3, 1, 4, 5}, DataType::QuantisedAsymm8, inputsQScale));
-    Connect(layer, output, TensorInfo({3, 7}, DataType::QuantisedAsymm8, outputQScale));
+    Connect(input, layer, TensorInfo({3, 1, 4, 5}, DataType::QAsymmU8, inputsQScale));
+    Connect(layer, output, TensorInfo({3, 7}, DataType::QAsymmU8, outputQScale));
 
     // check the constants that they are not NULL
     BOOST_CHECK(layer->m_Weight != nullptr);
index ca3c56375740aca7aca45a1a8fc5a27676fb47ab..162cc8436c8e8131ebad8cd81f405357f8b33cbb 100644 (file)
@@ -207,8 +207,8 @@ void CheckUntouchedSubgraph(const SubgraphView& untouchedSubgraph,
 // Creates a subgraph containing only a single unsupported layer (only convolutions are unsupported by the mock backend)
 SubgraphView::SubgraphViewPtr BuildFullyUnsupportedSubgraph1(Graph& graph, LayerNameToLayerMap& layersInGraph)
 {
-    const TensorInfo inputInfo ({  1, 16, 16, 16 }, DataType::QuantisedAsymm8, 1.0f, 0);
-    const TensorInfo outputInfo({  1, 16, 16, 16 }, DataType::QuantisedAsymm8, 1.0f, 0);
+    const TensorInfo inputInfo ({  1, 16, 16, 16 }, DataType::QAsymmU8, 1.0f, 0);
+    const TensorInfo outputInfo({  1, 16, 16, 16 }, DataType::QAsymmU8, 1.0f, 0);
 
     Pooling2dDescriptor poolingDescriptor;
     poolingDescriptor.m_PoolType      = armnn::PoolingAlgorithm::Average;
@@ -242,8 +242,8 @@ SubgraphView::SubgraphViewPtr BuildFullyUnsupportedSubgraph1(Graph& graph, Layer
 // Creates a subgraph containing only unsupported layers (only convolutions are unsupported by the mock backend)
 SubgraphView::SubgraphViewPtr BuildFullyUnsupportedSubgraph2(Graph& graph, LayerNameToLayerMap& layersInGraph)
 {
-    const TensorInfo inputInfo ({  1, 16, 16, 16 }, DataType::QuantisedAsymm8, 1.0f, 0);
-    const TensorInfo outputInfo({  1, 16, 16, 16 }, DataType::QuantisedAsymm8, 1.0f, 0);
+    const TensorInfo inputInfo ({  1, 16, 16, 16 }, DataType::QAsymmU8, 1.0f, 0);
+    const TensorInfo outputInfo({  1, 16, 16, 16 }, DataType::QAsymmU8, 1.0f, 0);
 
     Pooling2dDescriptor poolingDescriptor;
     poolingDescriptor.m_PoolType      = armnn::PoolingAlgorithm::Average;
@@ -285,9 +285,9 @@ SubgraphView::SubgraphViewPtr BuildFullyUnsupportedSubgraph2(Graph& graph, Layer
 // Creates a simple subgraph with only one convolution layer, supported by the mock backend
 SubgraphView::SubgraphViewPtr BuildFullyOptimizableSubgraph1(Graph& graph, LayerNameToLayerMap& layersInGraph)
 {
-    const TensorInfo inputInfo ({  1, 16, 16, 16 }, DataType::QuantisedAsymm8, 1.0f, 0);
-    const TensorInfo outputInfo({  1, 16, 16, 16 }, DataType::QuantisedAsymm8, 1.0f, 0);
-    const TensorInfo weightInfo({ 16,  1,  1, 16 }, DataType::QuantisedAsymm8, 0.9f, 0);
+    const TensorInfo inputInfo ({  1, 16, 16, 16 }, DataType::QAsymmU8, 1.0f, 0);
+    const TensorInfo outputInfo({  1, 16, 16, 16 }, DataType::QAsymmU8, 1.0f, 0);
+    const TensorInfo weightInfo({ 16,  1,  1, 16 }, DataType::QAsymmU8, 0.9f, 0);
     const TensorInfo biasInfo  ({  1,  1,  1, 16 }, DataType::Signed32,        0.9f, 0);
 
     Convolution2dDescriptor convolutionDescriptor;
@@ -315,9 +315,9 @@ SubgraphView::SubgraphViewPtr BuildFullyOptimizableSubgraph1(Graph& graph, Layer
 // Creates a subgraph with five convolutions layers, all supported by the mock backend
 SubgraphView::SubgraphViewPtr BuildFullyOptimizableSubgraph2(Graph& graph, LayerNameToLayerMap& layersInGraph)
 {
-    const TensorInfo inputInfo ({  1, 16, 16, 16 }, DataType::QuantisedAsymm8, 1.0f, 0);
-    const TensorInfo outputInfo({  1, 16, 16, 16 }, DataType::QuantisedAsymm8, 1.0f, 0);
-    const TensorInfo weightInfo({ 16,  1,  1, 16 }, DataType::QuantisedAsymm8, 0.9f, 0);
+    const TensorInfo inputInfo ({  1, 16, 16, 16 }, DataType::QAsymmU8, 1.0f, 0);
+    const TensorInfo outputInfo({  1, 16, 16, 16 }, DataType::QAsymmU8, 1.0f, 0);
+    const TensorInfo weightInfo({ 16,  1,  1, 16 }, DataType::QAsymmU8, 0.9f, 0);
     const TensorInfo biasInfo  ({  1,  1,  1, 16 }, DataType::Signed32,        0.9f, 0);
 
     Convolution2dDescriptor convolutionDescriptor;
@@ -362,9 +362,9 @@ SubgraphView::SubgraphViewPtr BuildFullyOptimizableSubgraph2(Graph& graph, Layer
 // (only convolutions are unsupported by the mock backend)
 SubgraphView::SubgraphViewPtr BuildPartiallySupportedSubgraph(Graph& graph, LayerNameToLayerMap& layersInGraph)
 {
-    const TensorInfo inputInfo ({  1, 16, 16, 16 }, DataType::QuantisedAsymm8, 1.0f, 0);
-    const TensorInfo outputInfo({  1, 16, 16, 16 }, DataType::QuantisedAsymm8, 1.0f, 0);
-    const TensorInfo weightInfo({ 16,  1,  1, 16 }, DataType::QuantisedAsymm8, 0.9f, 0);
+    const TensorInfo inputInfo ({  1, 16, 16, 16 }, DataType::QAsymmU8, 1.0f, 0);
+    const TensorInfo outputInfo({  1, 16, 16, 16 }, DataType::QAsymmU8, 1.0f, 0);
+    const TensorInfo weightInfo({ 16,  1,  1, 16 }, DataType::QAsymmU8, 0.9f, 0);
     const TensorInfo biasInfo  ({  1,  1,  1, 16 }, DataType::Signed32,        0.9f, 0);
 
     Convolution2dDescriptor convolutionDescriptor;
@@ -421,9 +421,9 @@ SubgraphView::SubgraphViewPtr BuildPartiallySupportedSubgraph(Graph& graph, Laye
 // Creates a subgraph with only unoptimizable layers ("unoptimizable" is added to the layer's name)
 SubgraphView::SubgraphViewPtr BuildFullyUnoptimizableSubgraph1(Graph& graph, LayerNameToLayerMap& layersInGraph)
 {
-    const TensorInfo inputInfo ({  1, 16, 16, 16 }, DataType::QuantisedAsymm8, 1.0f, 0);
-    const TensorInfo outputInfo({  1, 16, 16, 16 }, DataType::QuantisedAsymm8, 1.0f, 0);
-    const TensorInfo weightInfo({ 16,  1,  1, 16 }, DataType::QuantisedAsymm8, 0.9f, 0);
+    const TensorInfo inputInfo ({  1, 16, 16, 16 }, DataType::QAsymmU8, 1.0f, 0);
+    const TensorInfo outputInfo({  1, 16, 16, 16 }, DataType::QAsymmU8, 1.0f, 0);
+    const TensorInfo weightInfo({ 16,  1,  1, 16 }, DataType::QAsymmU8, 0.9f, 0);
     const TensorInfo biasInfo  ({  1,  1,  1, 16 }, DataType::Signed32,        0.9f, 0);
 
     Convolution2dDescriptor convolutionDescriptor;
@@ -452,9 +452,9 @@ SubgraphView::SubgraphViewPtr BuildFullyUnoptimizableSubgraph1(Graph& graph, Lay
 // Creates a subgraph with some unoptimizable layers ("unoptimizable" is added to the layer's name)
 SubgraphView::SubgraphViewPtr BuildPartiallyOptimizableSubgraph1(Graph& graph, LayerNameToLayerMap& layersInGraph)
 {
-    const TensorInfo inputInfo ({  1, 16, 16, 16 }, DataType::QuantisedAsymm8, 1.0f, 0);
-    const TensorInfo outputInfo({  1, 16, 16, 16 }, DataType::QuantisedAsymm8, 1.0f, 0);
-    const TensorInfo weightInfo({ 16,  1,  1, 16 }, DataType::QuantisedAsymm8, 0.9f, 0);
+    const TensorInfo inputInfo ({  1, 16, 16, 16 }, DataType::QAsymmU8, 1.0f, 0);
+    const TensorInfo outputInfo({  1, 16, 16, 16 }, DataType::QAsymmU8, 1.0f, 0);
+    const TensorInfo weightInfo({ 16,  1,  1, 16 }, DataType::QAsymmU8, 0.9f, 0);
     const TensorInfo biasInfo  ({  1,  1,  1, 16 }, DataType::Signed32,        0.9f, 0);
 
     Convolution2dDescriptor convolutionDescriptor;
@@ -501,9 +501,9 @@ SubgraphView::SubgraphViewPtr BuildPartiallyOptimizableSubgraph1(Graph& graph, L
 // this is meant to test input slots coming from different layers
 SubgraphView::SubgraphViewPtr BuildPartiallyOptimizableSubgraph2(Graph& graph, LayerNameToLayerMap& layersInGraph)
 {
-    const TensorInfo inputInfo ({  1, 16, 16, 16 }, DataType::QuantisedAsymm8, 1.0f, 0);
-    const TensorInfo outputInfo({  1, 16, 16, 16 }, DataType::QuantisedAsymm8, 1.0f, 0);
-    const TensorInfo weightInfo({ 16,  1,  1, 16 }, DataType::QuantisedAsymm8, 0.9f, 0);
+    const TensorInfo inputInfo ({  1, 16, 16, 16 }, DataType::QAsymmU8, 1.0f, 0);
+    const TensorInfo outputInfo({  1, 16, 16, 16 }, DataType::QAsymmU8, 1.0f, 0);
+    const TensorInfo weightInfo({ 16,  1,  1, 16 }, DataType::QAsymmU8, 0.9f, 0);
     const TensorInfo biasInfo  ({  1,  1,  1, 16 }, DataType::Signed32,        0.9f, 0);
 
     Convolution2dDescriptor convolutionDescriptor;
index 609773ce89e380acecd4ec8c5b009b83c742e655..cbba666004b580dfd74eaf80b6c93224e14e536c 100644 (file)
@@ -43,12 +43,12 @@ armnn::INetworkPtr CreateQuantizedLstmNetwork(MultiArray input,
     int32_t cellStateOffset = 0;
 
     armnn::TensorInfo inputWeightsInfo({outputSize, inputSize},
-                                       armnn::DataType::QuantisedAsymm8,
+                                       armnn::DataType::QAsymmU8,
                                        weightsScale,
                                        weightsOffset);
 
     armnn::TensorInfo recurrentWeightsInfo({outputSize, outputSize},
-                                           armnn::DataType::QuantisedAsymm8,
+                                           armnn::DataType::QAsymmU8,
                                            weightsScale,
                                            weightsOffset);
 
@@ -121,27 +121,27 @@ armnn::INetworkPtr CreateQuantizedLstmNetwork(MultiArray input,
     armnn::IConnectableLayer* const outputStateOut  = net->AddOutputLayer(1);
 
     armnn::TensorInfo inputTensorInfo({batchSize , inputSize},
-                                      armnn::DataType::QuantisedAsymm8,
+                                      armnn::DataType::QAsymmU8,
                                       inputOutputScale,
                                       inputOutputOffset);
 
     armnn::TensorInfo cellStateInTensorInfo({batchSize , outputSize},
-                                            armnn::DataType::QuantisedSymm16,
+                                            armnn::DataType::QSymmS16,
                                             cellStateScale,
                                             cellStateOffset);
 
     armnn::TensorInfo outputStateInTensorInfo({batchSize , outputSize},
-                                              armnn::DataType::QuantisedAsymm8,
+                                              armnn::DataType::QAsymmU8,
                                               inputOutputScale,
                                               inputOutputOffset);
 
     armnn::TensorInfo cellStateOutTensorInfo({batchSize, outputSize},
-                                             armnn::DataType::QuantisedSymm16,
+                                             armnn::DataType::QSymmS16,
                                              cellStateScale,
                                              cellStateOffset);
 
     armnn::TensorInfo outputTensorInfo({batchSize, outputSize},
-                                       armnn::DataType::QuantisedAsymm8,
+                                       armnn::DataType::QAsymmU8,
                                        inputOutputScale,
                                        inputOutputOffset);
 
@@ -178,23 +178,23 @@ IsCloseEnough(T value1, T value2, T tolerance)
 void QuantizedLstmEndToEnd(const std::vector<armnn::BackendId>& backends)
 {
     std::vector<uint8_t> inputVector = {166, 179, 50, 150};
-    armnn::TensorInfo inputDesc({2, 2}, armnn::DataType::QuantisedAsymm8);
+    armnn::TensorInfo inputDesc({2, 2}, armnn::DataType::QAsymmU8);
     boost::multi_array<uint8_t, 2> input = MakeTensor<uint8_t, 2>(inputDesc, inputVector);
 
     std::vector<int16_t> cellStateInVector = {876, 1034, 955, -909, 761, 1029, 796, -1036};
-    armnn::TensorInfo cellStateInDesc({2, 4}, armnn::DataType::QuantisedSymm16);
+    armnn::TensorInfo cellStateInDesc({2, 4}, armnn::DataType::QSymmS16);
     boost::multi_array<int16_t, 2> cellStateIn = MakeTensor<int16_t, 2>(cellStateInDesc, cellStateInVector);
 
     std::vector<uint8_t> outputStateInVector = {136, 150, 140, 115, 135, 152, 138, 112};
-    armnn::TensorInfo outputStateInDesc({2, 4}, armnn::DataType::QuantisedAsymm8);
+    armnn::TensorInfo outputStateInDesc({2, 4}, armnn::DataType::QAsymmU8);
     boost::multi_array<uint8_t, 2> outputStateIn = MakeTensor<uint8_t, 2>(outputStateInDesc, outputStateInVector);
 
     std::vector<int16_t> cellStateOutVector = {1485, 1177, 1373, -1023, 1019, 1355, 1097, -1235};
-    armnn::TensorInfo cellStateOutVectorDesc({2, 4}, armnn::DataType::QuantisedSymm16);
+    armnn::TensorInfo cellStateOutVectorDesc({2, 4}, armnn::DataType::QSymmS16);
     boost::multi_array<int16_t, 2> cellStateOut = MakeTensor<int16_t, 2>(cellStateOutVectorDesc, cellStateOutVector);
 
     std::vector<uint8_t> outputStateOutVector = {140, 151, 146, 112, 136, 156, 142, 112};
-    armnn::TensorInfo outputDesc({2, 4}, armnn::DataType::QuantisedAsymm8);
+    armnn::TensorInfo outputDesc({2, 4}, armnn::DataType::QAsymmU8);
     boost::multi_array<uint8_t, 2> outputStateOut = MakeTensor<uint8_t, 2>(outputDesc, outputStateOutVector);
 
     // Builds up the structure of the network
index b5acd88e8980bedd642444685126188881162daa..3c47eab01f240ef260c17b31a6bffd667674a270 100644 (file)
@@ -615,7 +615,7 @@ BOOST_AUTO_TEST_CASE(BiasPerAxisQuantization_Validate)
     const TensorShape weightShape{ cOutput, cInput,  hInput,  wInput  };
     const TensorShape biasShape  { cOutput                            };
 
-    constexpr DataType inputType  = DataType::QuantisedAsymm8;
+    constexpr DataType inputType  = DataType::QAsymmU8;
     constexpr DataType weightType = DataType::QuantizedSymm8PerAxis;
     constexpr DataType biasType   = DataType::Signed32;
 
index 552eab2cae519e44107519b12047981d512646d9..0b0f265db43b3e6b23bcffa2ccea6e60be76bad6 100644 (file)
@@ -98,9 +98,9 @@ inline armnn::Optional<armnn::DataType> GetBiasTypeFromWeightsType(armnn::Option
         case armnn::DataType::Float16:
         case armnn::DataType::Float32:
             return weightsType;
-        case armnn::DataType::QuantisedAsymm8:
+        case armnn::DataType::QAsymmU8:
             return armnn::DataType::Signed32;
-        case armnn::DataType::QuantisedSymm16:
+        case armnn::DataType::QSymmS16:
             return armnn::DataType::Signed32;
         default:
             BOOST_ASSERT_MSG(false, "GetBiasTypeFromWeightsType(): Unsupported data type.");
index ff76a38b08d4dbef60671e949420c94054be58e5..4ec20d87d79d2465a6aaa51f82e81fe9bdfb9b7d 100644 (file)
@@ -71,7 +71,7 @@ LayerTestResult<T, 2> Abs2dTest(
     float qScale    = 0.0625f;
     int32_t qOffset = 64;
 
-    if (ArmnnType == armnn::DataType::QuantisedSymm16)
+    if (ArmnnType == armnn::DataType::QSymmS16)
     {
         qScale  = 0.1f;
         qOffset = 0;
@@ -117,7 +117,7 @@ LayerTestResult<T, 3> Abs3dTest(
     float qScale    = 0.0625f;
     int32_t qOffset = 64;
 
-    if (ArmnnType == armnn::DataType::QuantisedSymm16)
+    if (ArmnnType == armnn::DataType::QSymmS16)
     {
         qScale  = 0.1f;
         qOffset = 0;
@@ -218,13 +218,13 @@ Abs2dTest<armnn::DataType::Float16>(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
 
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 2>
-Abs2dTest<armnn::DataType::QuantisedAsymm8>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 2>
+Abs2dTest<armnn::DataType::QAsymmU8>(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
 
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 2>
-Abs2dTest<armnn::DataType::QuantisedSymm16>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 2>
+Abs2dTest<armnn::DataType::QSymmS16>(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
 
@@ -238,13 +238,13 @@ Abs3dTest<armnn::DataType::Float16>(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
 
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 3>
-Abs3dTest<armnn::DataType::QuantisedAsymm8>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 3>
+Abs3dTest<armnn::DataType::QAsymmU8>(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
 
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 3>
-Abs3dTest<armnn::DataType::QuantisedSymm16>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 3>
+Abs3dTest<armnn::DataType::QSymmS16>(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
 
index 2f2d8dbd388e170f27a3093f9d0ffb796fbd02cf..87f42194b9256c987d4d29c1cbfd79946ab5a6f2 100644 (file)
@@ -176,7 +176,7 @@ LayerTestResult<uint8_t, 4> BoundedReLuUint8UpperBoundOnlyTest(
     float outputScale    = 6.0f / 255.0f;
     int32_t outputOffset = 0;
 
-    return BoundedReLuTestCommon<armnn::DataType::QuantisedAsymm8>(
+    return BoundedReLuTestCommon<armnn::DataType::QAsymmU8>(
         workloadFactory, memoryManager, 6.0f, 0.0f,
         inputScale, inputOffset, outputScale, outputOffset,
         input, output, inputWidth, inputHeight, inputChannels, inputBatchSize);
@@ -205,7 +205,7 @@ LayerTestResult<uint8_t, 4> BoundedReLuUint8UpperAndLowerBoundTest(
     int32_t inputOffset = 112;
     float inputScale    = 0.0125f;
 
-    return BoundedReLuTestCommon<armnn::DataType::QuantisedAsymm8>(
+    return BoundedReLuTestCommon<armnn::DataType::QAsymmU8>(
         workloadFactory, memoryManager, 1.0f, -1.0f,
         inputScale, inputOffset, inputScale, inputOffset, // Input/output scale & offset same.
         input, output, inputWidth, inputHeight, inputChannels, inputBatchSize);
@@ -377,7 +377,7 @@ LayerTestResult<uint8_t, 4> ConstantLinearActivationUint8Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return ConstantLinearActivationTestCommon<armnn::DataType::QuantisedAsymm8>(
+    return ConstantLinearActivationTestCommon<armnn::DataType::QAsymmU8>(
         workloadFactory, memoryManager, 4.0f, 3);
 }
 
@@ -385,7 +385,7 @@ LayerTestResult<int16_t, 4> ConstantLinearActivationInt16Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return ConstantLinearActivationTestCommon<armnn::DataType::QuantisedSymm16>(
+    return ConstantLinearActivationTestCommon<armnn::DataType::QSymmS16>(
             workloadFactory, memoryManager, 0.1f, 0);
 }
 
@@ -508,14 +508,14 @@ LayerTestResult<uint8_t, 4> SimpleSigmoidUint8Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return SimpleSigmoidTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.1f, 50);
+    return SimpleSigmoidTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, 0.1f, 50);
 }
 
 LayerTestResult<int16_t, 4> SimpleSigmoidInt16Test(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return SimpleSigmoidTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 0.1f, 0);
+    return SimpleSigmoidTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, 0.1f, 0);
 }
 
 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
@@ -557,7 +557,7 @@ LayerTestResult<int16_t, 4> ReLuInt16Test(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return ReLuTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 0.1f, 0);
+    return ReLuTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, 0.1f, 0);
 }
 
 
@@ -565,7 +565,7 @@ LayerTestResult<uint8_t, 4> ReLuUint8Test(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return ReLuTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.1f, 0);
+    return ReLuTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, 0.1f, 0);
 }
 
 LayerTestResult<float, 4> ReLuTest(
@@ -616,7 +616,7 @@ LayerTestResult<int16_t, 4> BoundedReLuInt16Test(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return ReLuTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 0.1f, 0);
+    return ReLuTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, 0.1f, 0);
 }
 
 
@@ -667,14 +667,14 @@ LayerTestResult<uint8_t, 4> SoftReLuUint8Test(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return SoftReLuTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.0625f, 64);
+    return SoftReLuTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, 0.0625f, 64);
 }
 
 LayerTestResult<int16_t, 4> SoftReLuInt16Test(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return SoftReLuTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 0.1f, 0);
+    return SoftReLuTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, 0.1f, 0);
 }
 
 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
@@ -724,14 +724,14 @@ LayerTestResult<uint8_t, 4> LeakyReLuUint8Test(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return LeakyReLuTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.0625f, 64);
+    return LeakyReLuTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, 0.0625f, 64);
 }
 
 LayerTestResult<int16_t, 4> LeakyReLuInt16Test(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return LeakyReLuTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 0.1f, 0);
+    return LeakyReLuTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, 0.1f, 0);
 }
 
 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
@@ -780,14 +780,14 @@ LayerTestResult<uint8_t, 4> AbsUint8Test(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return AbsTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.0625f, 64);
+    return AbsTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, 0.0625f, 64);
 }
 
 LayerTestResult<int16_t, 4> AbsInt16Test(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return AbsTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 0.1f, 0);
+    return AbsTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, 0.1f, 0);
 }
 
 LayerTestResult<float, 5> SqrtNNTest(
@@ -892,14 +892,14 @@ LayerTestResult<uint8_t, 4> SqrtUint8Test(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return SqrtTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.0625f, 64);
+    return SqrtTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, 0.0625f, 64);
 }
 
 LayerTestResult<int16_t, 4> SqrtInt16Test(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return SqrtTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 0.1f, 0);
+    return SqrtTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, 0.1f, 0);
 }
 
 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
@@ -948,14 +948,14 @@ LayerTestResult<uint8_t, 4> SquareUint8Test(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return SquareTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.0625f, 64);
+    return SquareTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, 0.0625f, 64);
 }
 
 LayerTestResult<int16_t, 4> SquareInt16Test(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return SquareTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 0.1f, 0);
+    return SquareTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, 0.1f, 0);
 }
 
 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
@@ -1006,14 +1006,14 @@ LayerTestResult<uint8_t, 4> TanhUint8Test(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return TanhTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.1f, 64);
+    return TanhTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, 0.1f, 64);
 }
 
 LayerTestResult<int16_t, 4> TanhInt16Test(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return TanhTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 0.1f, 0);
+    return TanhTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, 0.1f, 0);
 }
 
 
@@ -1130,7 +1130,7 @@ LayerTestResult<uint8_t,4> CompareActivationUint8Test(
     armnn::IWorkloadFactory& refWorkloadFactory,
     armnn::ActivationFunction f)
 {
-    return CompareActivationTestImpl<armnn::DataType::QuantisedAsymm8>(
+    return CompareActivationTestImpl<armnn::DataType::QAsymmU8>(
         workloadFactory, memoryManager, refWorkloadFactory, f, 5, 0.1f, 50);
 }
 
@@ -1140,6 +1140,6 @@ LayerTestResult<int16_t,4> CompareActivationInt16Test(
         armnn::IWorkloadFactory& refWorkloadFactory,
         armnn::ActivationFunction f)
 {
-    return CompareActivationTestImpl<armnn::DataType::QuantisedSymm16>(
+    return CompareActivationTestImpl<armnn::DataType::QSymmS16>(
             workloadFactory, memoryManager, refWorkloadFactory, f, 5, 0.1f, 0);
 }
index a3a21ab131352280d7cc08657bf5b0f83e09ff62..82dc59b66b3c8c01f96a8df9d964b8294e6f6639 100644 (file)
@@ -331,7 +331,7 @@ LayerTestResult<uint8_t, 4> AdditionBroadcastUint8Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return AdditionBroadcastTestImpl<armnn::DataType::QuantisedAsymm8>(
+    return AdditionBroadcastTestImpl<armnn::DataType::QAsymmU8>(
         workloadFactory, memoryManager, 2.f, 0);
 }
 
@@ -339,7 +339,7 @@ LayerTestResult<int16_t, 4> AdditionBroadcastInt16Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return AdditionBroadcastTestImpl<armnn::DataType::QuantisedSymm16>(
+    return AdditionBroadcastTestImpl<armnn::DataType::QSymmS16>(
         workloadFactory, memoryManager, 2.f, 0);
 }
 
@@ -355,7 +355,7 @@ LayerTestResult<uint8_t, 4> AdditionBroadcast1ElementUint8Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return AdditionBroadcast1ElementTestImpl<armnn::DataType::QuantisedAsymm8>(
+    return AdditionBroadcast1ElementTestImpl<armnn::DataType::QAsymmU8>(
         workloadFactory, memoryManager, 0.1333333f, 128);
 }
 
@@ -363,7 +363,7 @@ LayerTestResult<int16_t, 4> AdditionBroadcast1ElementInt16Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return AdditionBroadcast1ElementTestImpl<armnn::DataType::QuantisedSymm16>(
+    return AdditionBroadcast1ElementTestImpl<armnn::DataType::QSymmS16>(
         workloadFactory, memoryManager, 0.1333333f, 0);
 }
 
@@ -392,7 +392,7 @@ LayerTestResult<uint8_t, 4> AdditionUint8Test(
         255, 186, 255, 186, 255, 214, // 2261(clamped), 1281, 2163(clamped), 1281, 2408(clamped), 1477
     });
 
-    return ElementwiseTestHelper<4, armnn::AdditionQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
+    return ElementwiseTestHelper<4, armnn::AdditionQueueDescriptor, armnn::DataType::QAsymmU8>(
         workloadFactory,
         memoryManager,
         shape0,
@@ -434,7 +434,7 @@ LayerTestResult<int16_t, 4> AdditionInt16Test(
         329, 189, 315, 189, 350, 217, // 2303(clamped), 1323, 2205(clamped), 1323, 2450(clamped), 1519
     };
 
-    return ElementwiseTestHelper<4, armnn::AdditionQueueDescriptor, armnn::DataType::QuantisedSymm16>(
+    return ElementwiseTestHelper<4, armnn::AdditionQueueDescriptor, armnn::DataType::QSymmS16>(
         workloadFactory,
         memoryManager,
         shape0,
index 2733100d6cec88689846881def1593f9020c893c..d63c179dfd85e0ce8866657e8a069f7ef4ffc944 100644 (file)
@@ -264,12 +264,12 @@ ArgMaxSimpleTest<armnn::DataType::Float32>(
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
 
 template LayerTestResult<int32_t, 3>
-ArgMaxSimpleTest<armnn::DataType::QuantisedAsymm8>(
+ArgMaxSimpleTest<armnn::DataType::QAsymmU8>(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
 
 template LayerTestResult<int32_t, 3>
-ArgMaxSimpleTest<armnn::DataType::QuantisedSymm16>(
+ArgMaxSimpleTest<armnn::DataType::QSymmS16>(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
 
@@ -284,12 +284,12 @@ ArgMinSimpleTest<armnn::DataType::Float32>(
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
 
 template LayerTestResult<int32_t, 3>
-ArgMinSimpleTest<armnn::DataType::QuantisedAsymm8>(
+ArgMinSimpleTest<armnn::DataType::QAsymmU8>(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
 
 template LayerTestResult<int32_t, 3>
-ArgMinSimpleTest<armnn::DataType::QuantisedSymm16>(
+ArgMinSimpleTest<armnn::DataType::QSymmS16>(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
 
@@ -304,12 +304,12 @@ ArgMinChannelTest<armnn::DataType::Float32>(
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
 
 template LayerTestResult<int32_t, 3>
-ArgMinChannelTest<armnn::DataType::QuantisedAsymm8>(
+ArgMinChannelTest<armnn::DataType::QAsymmU8>(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
 
 template LayerTestResult<int32_t, 3>
-ArgMinChannelTest<armnn::DataType::QuantisedSymm16>(
+ArgMinChannelTest<armnn::DataType::QSymmS16>(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
 
@@ -324,12 +324,12 @@ ArgMaxChannelTest<armnn::DataType::Float32>(
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
 
 template LayerTestResult<int32_t, 3>
-ArgMaxChannelTest<armnn::DataType::QuantisedAsymm8>(
+ArgMaxChannelTest<armnn::DataType::QAsymmU8>(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
 
 template LayerTestResult<int32_t, 3>
-ArgMaxChannelTest<armnn::DataType::QuantisedSymm16>(
+ArgMaxChannelTest<armnn::DataType::QSymmS16>(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
 
@@ -349,7 +349,7 @@ ArgMaxHeightTest<armnn::DataType::Signed32>(
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
 
 template LayerTestResult<int32_t, 3>
-ArgMaxHeightTest<armnn::DataType::QuantisedAsymm8>(
+ArgMaxHeightTest<armnn::DataType::QAsymmU8>(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
 
@@ -364,6 +364,6 @@ ArgMinWidthTest<armnn::DataType::Signed32>(
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
 
 template LayerTestResult<int32_t, 3>
-ArgMinWidthTest<armnn::DataType::QuantisedAsymm8>(
+ArgMinWidthTest<armnn::DataType::QAsymmU8>(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
index 7857b3531b3e2bac23873af0fc5613efb6088fe8..7a55146b37ccdf69e15e59aa706b2ff77d4c0a4c 100644 (file)
@@ -432,7 +432,7 @@ LayerTestResult<uint8_t, 4> BatchNormUint8Test(
         2.f, 4.f
     };
 
-    return BatchNormTestImpl<armnn::DataType::QuantisedAsymm8>(
+    return BatchNormTestImpl<armnn::DataType::QAsymmU8>(
         workloadFactory,
         memoryManager,
         inputOutputShape,
@@ -482,7 +482,7 @@ LayerTestResult<uint8_t, 4> BatchNormUint8NhwcTest(
         6.f, 4.f
     };
 
-    return BatchNormTestImpl<armnn::DataType::QuantisedAsymm8>(
+    return BatchNormTestImpl<armnn::DataType::QAsymmU8>(
         workloadFactory,
         memoryManager,
         inputOutputShape, inputValues, expectedOutputValues,
@@ -524,7 +524,7 @@ LayerTestResult<int16_t, 4> BatchNormInt16Test(
         2.f, 4.f
     };
 
-    return BatchNormTestImpl<armnn::DataType::QuantisedSymm16>(
+    return BatchNormTestImpl<armnn::DataType::QSymmS16>(
         workloadFactory,
         memoryManager,
         inputOutputShape,
@@ -574,7 +574,7 @@ LayerTestResult<int16_t, 4> BatchNormInt16NhwcTest(
         6.f, 4.f
     };
 
-    return BatchNormTestImpl<armnn::DataType::QuantisedSymm16>(
+    return BatchNormTestImpl<armnn::DataType::QSymmS16>(
         workloadFactory,
         memoryManager,
         inputOutputShape,
index c8272f47f0f05aa34c83242e7fefdd76ba540e6c..7327536dff6b23d4d8bcb7d4c54724dbe028554d 100644 (file)
@@ -408,7 +408,7 @@ LayerTestResult<uint8_t, 4> EqualSimpleUint8Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return ComparisonTestImpl<armnn::DataType::QuantisedAsymm8>(
+    return ComparisonTestImpl<armnn::DataType::QAsymmU8>(
         workloadFactory,
         memoryManager,
         s_SimpleTestData,
@@ -419,7 +419,7 @@ LayerTestResult<uint8_t, 4> EqualBroadcast1ElementUint8Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return ComparisonTestImpl<armnn::DataType::QuantisedAsymm8>(
+    return ComparisonTestImpl<armnn::DataType::QAsymmU8>(
         workloadFactory,
         memoryManager,
         s_Broadcast1ElementTestData,
@@ -430,7 +430,7 @@ LayerTestResult<uint8_t, 4> EqualBroadcast1dVectorUint8Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return ComparisonTestImpl<armnn::DataType::QuantisedAsymm8>(
+    return ComparisonTestImpl<armnn::DataType::QAsymmU8>(
         workloadFactory,
         memoryManager,
         s_Broadcast1dVectorTestData,
@@ -507,7 +507,7 @@ LayerTestResult<uint8_t, 4> GreaterSimpleUint8Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return ComparisonTestImpl<armnn::DataType::QuantisedAsymm8>(
+    return ComparisonTestImpl<armnn::DataType::QAsymmU8>(
         workloadFactory,
         memoryManager,
         s_SimpleTestData,
@@ -518,7 +518,7 @@ LayerTestResult<uint8_t, 4> GreaterBroadcast1ElementUint8Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return ComparisonTestImpl<armnn::DataType::QuantisedAsymm8>(
+    return ComparisonTestImpl<armnn::DataType::QAsymmU8>(
         workloadFactory,
         memoryManager,
         s_Broadcast1ElementTestData,
@@ -529,7 +529,7 @@ LayerTestResult<uint8_t, 4> GreaterBroadcast1dVectorUint8Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return ComparisonTestImpl<armnn::DataType::QuantisedAsymm8>(
+    return ComparisonTestImpl<armnn::DataType::QAsymmU8>(
         workloadFactory,
         memoryManager,
         s_Broadcast1dVectorTestData,
@@ -607,7 +607,7 @@ LayerTestResult<uint8_t, 4> GreaterOrEqualSimpleUint8Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return ComparisonTestImpl<armnn::DataType::QuantisedAsymm8>(
+    return ComparisonTestImpl<armnn::DataType::QAsymmU8>(
         workloadFactory,
         memoryManager,
         s_SimpleTestData,
@@ -618,7 +618,7 @@ LayerTestResult<uint8_t, 4> GreaterOrEqualBroadcast1ElementUint8Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return ComparisonTestImpl<armnn::DataType::QuantisedAsymm8>(
+    return ComparisonTestImpl<armnn::DataType::QAsymmU8>(
         workloadFactory,
         memoryManager,
         s_Broadcast1ElementTestData,
@@ -629,7 +629,7 @@ LayerTestResult<uint8_t, 4> GreaterOrEqualBroadcast1dVectorUint8Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return ComparisonTestImpl<armnn::DataType::QuantisedAsymm8>(
+    return ComparisonTestImpl<armnn::DataType::QAsymmU8>(
         workloadFactory,
         memoryManager,
         s_Broadcast1dVectorTestData,
@@ -706,7 +706,7 @@ LayerTestResult<uint8_t, 4> LessSimpleUint8Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return ComparisonTestImpl<armnn::DataType::QuantisedAsymm8>(
+    return ComparisonTestImpl<armnn::DataType::QAsymmU8>(
         workloadFactory,
         memoryManager,
         s_SimpleTestData,
@@ -717,7 +717,7 @@ LayerTestResult<uint8_t, 4> LessBroadcast1ElementUint8Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return ComparisonTestImpl<armnn::DataType::QuantisedAsymm8>(
+    return ComparisonTestImpl<armnn::DataType::QAsymmU8>(
         workloadFactory,
         memoryManager,
         s_Broadcast1ElementTestData,
@@ -728,7 +728,7 @@ LayerTestResult<uint8_t, 4> LessBroadcast1dVectorUint8Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return ComparisonTestImpl<armnn::DataType::QuantisedAsymm8>(
+    return ComparisonTestImpl<armnn::DataType::QAsymmU8>(
         workloadFactory,
         memoryManager,
         s_Broadcast1dVectorTestData,
@@ -806,7 +806,7 @@ LayerTestResult<uint8_t, 4> LessOrEqualSimpleUint8Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return ComparisonTestImpl<armnn::DataType::QuantisedAsymm8>(
+    return ComparisonTestImpl<armnn::DataType::QAsymmU8>(
         workloadFactory,
         memoryManager,
         s_SimpleTestData,
@@ -817,7 +817,7 @@ LayerTestResult<uint8_t, 4> LessOrEqualBroadcast1ElementUint8Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return ComparisonTestImpl<armnn::DataType::QuantisedAsymm8>(
+    return ComparisonTestImpl<armnn::DataType::QAsymmU8>(
         workloadFactory,
         memoryManager,
         s_Broadcast1ElementTestData,
@@ -828,7 +828,7 @@ LayerTestResult<uint8_t, 4> LessOrEqualBroadcast1dVectorUint8Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return ComparisonTestImpl<armnn::DataType::QuantisedAsymm8>(
+    return ComparisonTestImpl<armnn::DataType::QAsymmU8>(
         workloadFactory,
         memoryManager,
         s_Broadcast1dVectorTestData,
@@ -906,7 +906,7 @@ LayerTestResult<uint8_t, 4> NotEqualSimpleUint8Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return ComparisonTestImpl<armnn::DataType::QuantisedAsymm8>(
+    return ComparisonTestImpl<armnn::DataType::QAsymmU8>(
         workloadFactory,
         memoryManager,
         s_SimpleTestData,
@@ -917,7 +917,7 @@ LayerTestResult<uint8_t, 4> NotEqualBroadcast1ElementUint8Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return ComparisonTestImpl<armnn::DataType::QuantisedAsymm8>(
+    return ComparisonTestImpl<armnn::DataType::QAsymmU8>(
         workloadFactory,
         memoryManager,
         s_Broadcast1ElementTestData,
@@ -928,7 +928,7 @@ LayerTestResult<uint8_t, 4> NotEqualBroadcast1dVectorUint8Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return ComparisonTestImpl<armnn::DataType::QuantisedAsymm8>(
+    return ComparisonTestImpl<armnn::DataType::QAsymmU8>(
         workloadFactory,
         memoryManager,
         s_Broadcast1dVectorTestData,
index 57ed7542d210a904a0013bb6e38e245a8526b2a3..29cd5ac5609ec845f2ea1e48b6139cc3c4f1ff92 100644 (file)
@@ -2054,14 +2054,14 @@ LayerTestResult<T, 3> ConcatDifferentInputOutputQParamTest(
 // Explicit template specializations
 //
 
-template LayerTestResult<ResolveType<DataType::QuantisedAsymm8>, 3>
-ConcatDifferentInputOutputQParamTest<DataType::QuantisedAsymm8>(
+template LayerTestResult<ResolveType<DataType::QAsymmU8>, 3>
+ConcatDifferentInputOutputQParamTest<DataType::QAsymmU8>(
     IWorkloadFactory& workloadFactory,
     const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     bool useSubtensor);
 
-template LayerTestResult<ResolveType<DataType::QuantisedSymm16>, 3>
-ConcatDifferentInputOutputQParamTest<DataType::QuantisedSymm16>(
+template LayerTestResult<ResolveType<DataType::QSymmS16>, 3>
+ConcatDifferentInputOutputQParamTest<DataType::QSymmS16>(
     IWorkloadFactory& workloadFactory,
     const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     bool useSubtensor);
@@ -2362,9 +2362,9 @@ LayerTestResult<uint8_t, 3> ConcatUint8DifferentQParamsTest(
     unsigned int inputChannels2 = 1;
 
     // Defines the tensor descriptors.
-    TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, DataType::QuantisedAsymm8);
-    TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, DataType::QuantisedAsymm8);
-    TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, DataType::QuantisedAsymm8);
+    TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, DataType::QAsymmU8);
+    TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, DataType::QAsymmU8);
+    TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, DataType::QAsymmU8);
 
     // Quantized input1 tensor. Range [-3, 1]
     const float inputScale1 = 0.015686f;
@@ -2507,9 +2507,9 @@ LayerTestResult<uint8_t, 3> ConcatUint8Test(
     unsigned int inputChannels2 = 1;
 
     // Defines the tensor descriptors.
-    TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, DataType::QuantisedAsymm8);
-    TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, DataType::QuantisedAsymm8);
-    TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, DataType::QuantisedAsymm8);
+    TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, DataType::QAsymmU8);
+    TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, DataType::QAsymmU8);
+    TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, DataType::QAsymmU8);
 
     // Arbitrary scale and offsets. They don't really matter as the Concat operator doesn't dequantize/quantize them.
     const float scale = 0.13497836f;
@@ -2645,9 +2645,9 @@ LayerTestResult<uint16_t, 3> ConcatUint16Test(
     unsigned int inputChannels2 = 1;
 
     // Defines the tensor descriptors.
-    TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, DataType::QuantisedSymm16);
-    TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, DataType::QuantisedSymm16);
-    TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, DataType::QuantisedSymm16);
+    TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, DataType::QSymmS16);
+    TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, DataType::QSymmS16);
+    TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, DataType::QSymmS16);
 
     // Arbitrary scale and offsets. They don't really matter as the Concat operator doesn't dequantize/quantize them.
     const float scale = 0.13497836f;
@@ -2765,28 +2765,28 @@ LayerTestResult<uint8_t, 1> Concat1dUint8Test(
     IWorkloadFactory& workloadFactory,
     const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return Concat1dTestImpl<DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
+    return Concat1dTestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, 0.5f, -1);
 }
 
 LayerTestResult<uint8_t, 2> Concat2dDim0Uint8Test(
     IWorkloadFactory& workloadFactory,
     const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return Concat2dDim0TestImpl<DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
+    return Concat2dDim0TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, 0.5f, -1);
 }
 
 LayerTestResult<uint8_t, 2> Concat2dDim1Uint8Test(
     IWorkloadFactory& workloadFactory,
     const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return Concat2dDim1TestImpl<DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
+    return Concat2dDim1TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, 0.5f, -1);
 }
 
 LayerTestResult<uint8_t, 2> Concat2dDim0DiffInputDimsUint8Test(
     IWorkloadFactory& workloadFactory,
     const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return Concat2dDim0DiffInputDimsTestImpl<DataType::QuantisedAsymm8>(
+    return Concat2dDim0DiffInputDimsTestImpl<DataType::QAsymmU8>(
         workloadFactory, memoryManager, 0.5f, -1);
 }
 
@@ -2794,7 +2794,7 @@ LayerTestResult<uint8_t, 2> Concat2dDim1DiffInputDimsUint8Test(
     IWorkloadFactory& workloadFactory,
     const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return Concat2dDim1DiffInputDimsTestImpl<DataType::QuantisedAsymm8>(
+    return Concat2dDim1DiffInputDimsTestImpl<DataType::QAsymmU8>(
         workloadFactory, memoryManager, 0.5f, -1);
 }
 
@@ -2802,14 +2802,14 @@ LayerTestResult<uint8_t, 3> Concat3dDim0Uint8Test(
     IWorkloadFactory& workloadFactory,
     const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return Concat3dDim0TestImpl<DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
+    return Concat3dDim0TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, 0.5f, -1);
 }
 
 LayerTestResult<uint8_t, 3> Concat3dDim1Uint8Test(
     IWorkloadFactory& workloadFactory,
     const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return Concat3dDim1TestImpl<DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
+    return Concat3dDim1TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, 0.5f, -1);
 }
 
 LayerTestResult<uint8_t, 3> Concat3dDim2Uint8Test(
@@ -2817,7 +2817,7 @@ LayerTestResult<uint8_t, 3> Concat3dDim2Uint8Test(
     const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     bool useSubtensor)
 {
-    return Concat3dDim2TestImpl<DataType::QuantisedAsymm8>(
+    return Concat3dDim2TestImpl<DataType::QAsymmU8>(
         workloadFactory, memoryManager, useSubtensor, 0.5f, -1);
 }
 
@@ -2825,14 +2825,14 @@ LayerTestResult<uint8_t, 3> Concat3dDim0DiffInputDimsUint8Test(
     IWorkloadFactory& workloadFactory,
     const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return Concat3dDim0TestImpl<DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
+    return Concat3dDim0TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, 0.5f, -1);
 }
 
 LayerTestResult<uint8_t, 3> Concat3dDim1DiffInputDimsUint8Test(
     IWorkloadFactory& workloadFactory,
     const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return Concat3dDim1DiffInputDimsTestImpl<DataType::QuantisedAsymm8>(
+    return Concat3dDim1DiffInputDimsTestImpl<DataType::QAsymmU8>(
         workloadFactory, memoryManager, 0.5f, -1);
 }
 
@@ -2841,7 +2841,7 @@ LayerTestResult<uint8_t, 3> Concat3dDim2DiffInputDimsUint8Test(
     const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     bool useSubtensor)
 {
-    return Concat3dDim2DiffInputDimsTestImpl<DataType::QuantisedAsymm8>(
+    return Concat3dDim2DiffInputDimsTestImpl<DataType::QAsymmU8>(
         workloadFactory, memoryManager, useSubtensor, 0.5f, -1);
 }
 
@@ -2849,28 +2849,28 @@ LayerTestResult<uint8_t, 4> Concat4dDim0Uint8Test(
     IWorkloadFactory& workloadFactory,
     const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return Concat4dDim0TestImpl<DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
+    return Concat4dDim0TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, 0.5f, -1);
 }
 
 LayerTestResult<uint8_t, 4> Concat4dDim1Uint8Test(
     IWorkloadFactory& workloadFactory,
     const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return Concat4dDim1TestImpl<DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
+    return Concat4dDim1TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, 0.5f, -1);
 }
 
 LayerTestResult<uint8_t, 4> Concat4dDim2Uint8Test(
     IWorkloadFactory& workloadFactory,
     const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return Concat4dDim2TestImpl<DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
+    return Concat4dDim2TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, 0.5f, -1);
 }
 
 LayerTestResult<uint8_t, 4> Concat4dDim3Uint8Test(
     IWorkloadFactory& workloadFactory,
     const IBackendInternal::IMemoryManagerSharedPtr& memoryManager, bool useSubtensor)
 {
-    return Concat4dDim3TestImpl<DataType::QuantisedAsymm8>(
+    return Concat4dDim3TestImpl<DataType::QAsymmU8>(
         workloadFactory, memoryManager, 0.5f, -1, useSubtensor);
 }
 
@@ -2878,7 +2878,7 @@ LayerTestResult<uint8_t, 4> Concat4dDiffShapeDim0Uint8Test(
     IWorkloadFactory& workloadFactory,
     const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return Concat4dDiffShapeDim0TestImpl<DataType::QuantisedAsymm8>(
+    return Concat4dDiffShapeDim0TestImpl<DataType::QAsymmU8>(
         workloadFactory, memoryManager, 0.5f, -1);
 }
 
@@ -2886,7 +2886,7 @@ LayerTestResult<uint8_t, 4> Concat4dDiffShapeDim1Uint8Test(
     IWorkloadFactory& workloadFactory,
     const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return Concat4dDiffShapeDim1TestImpl<DataType::QuantisedAsymm8>(
+    return Concat4dDiffShapeDim1TestImpl<DataType::QAsymmU8>(
         workloadFactory, memoryManager, 0.5f, -1);
 }
 
@@ -2894,7 +2894,7 @@ LayerTestResult<uint8_t, 4> Concat4dDiffShapeDim2Uint8Test(
     IWorkloadFactory& workloadFactory,
     const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return Concat4dDiffShapeDim2TestImpl<DataType::QuantisedAsymm8>(
+    return Concat4dDiffShapeDim2TestImpl<DataType::QAsymmU8>(
         workloadFactory, memoryManager, 0.5f, -1);
 }
 
@@ -2903,6 +2903,6 @@ LayerTestResult<uint8_t, 4> Concat4dDiffShapeDim3Uint8Test(
     const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     bool useSubtensor)
 {
-    return Concat4dDiffShapeDim3TestImpl<DataType::QuantisedAsymm8>(
+    return Concat4dDiffShapeDim3TestImpl<DataType::QAsymmU8>(
         workloadFactory, memoryManager, 0.5f, -1, useSubtensor);
 }
index cfb62637d1fc3e413f8d7e4412da7b4373e3b5b4..35868405f19971dc0290786b376fc010523370ba 100644 (file)
@@ -134,26 +134,26 @@ LayerTestResult<int16_t, 4> ConstantInt16SimpleQuantizationScaleNoOffsetTest(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return ConstantTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.0f, 0);
+    return ConstantTestImpl<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, 1.0f, 0);
 }
 
 LayerTestResult<uint8_t, 4> ConstantUint8SimpleQuantizationScaleNoOffsetTest(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return ConstantTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
+    return ConstantTestImpl<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, 1.0f, 0);
 }
 
 LayerTestResult<uint8_t, 4> ConstantUint8CustomQuantizationScaleAndOffsetTest(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return ConstantTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 2e-6f, 1);
+    return ConstantTestImpl<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, 2e-6f, 1);
 }
 
 LayerTestResult<int16_t, 4> ConstantInt16CustomQuantizationScaleAndOffsetTest(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return ConstantTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 2e-6f, 1);
+    return ConstantTestImpl<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, 2e-6f, 1);
 }
index a00fda7679e98ef5727c768240132cb24e1a66c4..055c9ab6e8ff33935477dea155dbef64f3975e72 100644 (file)
@@ -1018,13 +1018,13 @@ LayerTestResult<T, 4> Convolution2d3x3DilationTestCommon(
     int32_t qOffset;
     switch (ArmnnType)
     {
-        case armnn::DataType::QuantisedAsymm8:
+        case armnn::DataType::QAsymmU8:
         {
             qScale = 0.1f;
             qOffset = 128;
             break;
         }
-        case armnn::DataType::QuantisedSymm16:
+        case armnn::DataType::QSymmS16:
         {
             qScale = 0.1f;
             qOffset = 0;
@@ -2304,13 +2304,13 @@ LayerTestResult<T, 4> DepthwiseConvolution2d3x3DilationTestCommon(
     int32_t qOffset;
     switch (ArmnnType)
     {
-        case armnn::DataType::QuantisedAsymm8:
+        case armnn::DataType::QAsymmU8:
         {
             qScale = 0.1f;
             qOffset = 128;
             break;
         }
-        case armnn::DataType::QuantisedSymm16:
+        case armnn::DataType::QSymmS16:
         {
             qScale = 0.1f;
             qOffset = 0;
@@ -2800,15 +2800,15 @@ Convolution2d3x3Dilation3x3Test<armnn::DataType::Float32, armnn::DataType::Float
     bool,
     armnn::DataLayout);
 
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
-Convolution2d3x3Dilation3x3Test<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
+Convolution2d3x3Dilation3x3Test<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
     armnn::IWorkloadFactory&,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
     bool,
     armnn::DataLayout);
 
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
-Convolution2d3x3Dilation3x3Test<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
+Convolution2d3x3Dilation3x3Test<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
     armnn::IWorkloadFactory&,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
     bool,
@@ -2821,15 +2821,15 @@ Convolution2d2x3x3Dilation3x3Test<armnn::DataType::Float32, armnn::DataType::Flo
     bool,
     armnn::DataLayout);
 
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
-Convolution2d2x3x3Dilation3x3Test<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
+Convolution2d2x3x3Dilation3x3Test<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
     armnn::IWorkloadFactory&,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
     bool,
     armnn::DataLayout);
 
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
-Convolution2d2x3x3Dilation3x3Test<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
+Convolution2d2x3x3Dilation3x3Test<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
     armnn::IWorkloadFactory&,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
     bool,
@@ -2842,15 +2842,15 @@ Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<armnn::DataType::Float32, arm
     bool biasEnabled,
     const armnn::DataLayout layout);
 
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
-Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
+Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
     armnn::IWorkloadFactory &workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager,
     bool biasEnabled,
     const armnn::DataLayout layout);
 
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
-Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
+Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
     armnn::IWorkloadFactory &workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager,
     bool biasEnabled,
@@ -2863,15 +2863,15 @@ DepthwiseConvolution2d3x3Dilation3x3Test<armnn::DataType::Float32, armnn::DataTy
         bool,
         armnn::DataLayout);
 
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
-DepthwiseConvolution2d3x3Dilation3x3Test<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
+DepthwiseConvolution2d3x3Dilation3x3Test<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
         armnn::IWorkloadFactory&,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
         bool,
         armnn::DataLayout);
 
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
-DepthwiseConvolution2d3x3Dilation3x3Test<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
+DepthwiseConvolution2d3x3Dilation3x3Test<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
         armnn::IWorkloadFactory&,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
         bool,
@@ -2884,15 +2884,15 @@ DepthwiseConvolution2d2x3x3Dilation3x3Test<armnn::DataType::Float32, armnn::Data
         bool,
         armnn::DataLayout);
 
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
-DepthwiseConvolution2d2x3x3Dilation3x3Test<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
+DepthwiseConvolution2d2x3x3Dilation3x3Test<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
         armnn::IWorkloadFactory&,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
         bool,
         armnn::DataLayout);
 
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
-DepthwiseConvolution2d2x3x3Dilation3x3Test<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
+DepthwiseConvolution2d2x3x3Dilation3x3Test<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
         armnn::IWorkloadFactory&,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
         bool,
@@ -2932,7 +2932,7 @@ LayerTestResult<uint8_t, 4> SimpleConvolution2d3x5Uint8Test(
     bool biasEnabled,
     const armnn::DataLayout layout)
 {
-    return SimpleConvolution2d3x5TestCommon<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
+    return SimpleConvolution2d3x5TestCommon<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
         workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
 }
 
@@ -2981,7 +2981,7 @@ LayerTestResult<uint8_t, 4> SimpleConvolution2d3x3Uint8Test(
     bool biasEnabled,
     const armnn::DataLayout layout)
 {
-    return SimpleConvolution2d3x3TestCommon<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
+    return SimpleConvolution2d3x3TestCommon<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
         workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
 }
 
@@ -2991,7 +2991,7 @@ LayerTestResult<int16_t, 4> SimpleConvolution2d3x5QSymm16Test(
     bool biasEnabled,
     const armnn::DataLayout layout)
 {
-    return SimpleConvolution2d3x5TestCommon<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
+    return SimpleConvolution2d3x5TestCommon<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
         workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
 }
 
@@ -3001,7 +3001,7 @@ LayerTestResult<int16_t, 4> SimpleConvolution2d3x3QSymm16Test(
     bool biasEnabled,
     const armnn::DataLayout layout)
 {
-    return SimpleConvolution2d3x3TestCommon<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
+    return SimpleConvolution2d3x3TestCommon<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
             workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
 }
 
@@ -3038,7 +3038,7 @@ LayerTestResult<uint8_t, 4> Convolution1dUint8Test(
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     bool biasEnabled)
 {
-    return Convolution1dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
+    return Convolution1dTestImpl<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
             workloadFactory, memoryManager, 0.1f, 128, biasEnabled);
 }
 
@@ -3049,7 +3049,7 @@ LayerTestResult<uint8_t, 4> Convolution2dPerAxisQuantTest(
 {
     using namespace armnn;
 
-    const DataType inputType  = DataType::QuantisedAsymm8;
+    const DataType inputType  = DataType::QAsymmU8;
     const DataType kernelType = DataType::QuantizedSymm8PerAxis;
     const DataType biasType   = DataType::Signed32;
 
@@ -3220,7 +3220,7 @@ LayerTestResult<uint8_t, 4> DepthwiseConvolution2dUint8Test(
     bool biasEnabled,
     const armnn::DataLayout layout)
 {
-    return DepthwiseConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
+    return DepthwiseConvolution2dTestImpl<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
         workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
 }
 
@@ -3230,7 +3230,7 @@ LayerTestResult<uint8_t, 4> DepthwiseConvolution2dDepthMul1Uint8Test(
     bool biasEnabled,
     const armnn::DataLayout layout)
 {
-    return DepthwiseConvolution2dDepthMul1TestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
+    return DepthwiseConvolution2dDepthMul1TestImpl<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
         workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
 }
 
@@ -3252,7 +3252,7 @@ LayerTestResult<int16_t, 4> DepthwiseConvolution2dInt16Test(
         bool biasEnabled,
         const armnn::DataLayout layout)
 {
-    return DepthwiseConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
+    return DepthwiseConvolution2dTestImpl<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
         workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
 }
 
@@ -3262,7 +3262,7 @@ LayerTestResult<int16_t, 4> DepthwiseConvolution2dDepthMul1Int16Test(
                 bool biasEnabled,
                 const armnn::DataLayout layout)
 {
-    return DepthwiseConvolution2dDepthMul1TestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
+    return DepthwiseConvolution2dDepthMul1TestImpl<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
         workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
 }
 
@@ -3273,7 +3273,7 @@ LayerTestResult<uint8_t, 4> DepthwiseConvolution2dPerAxisQuantTest(
 {
     using namespace armnn;
 
-    const DataType inputType  = DataType::QuantisedAsymm8;
+    const DataType inputType  = DataType::QAsymmU8;
     const DataType kernelType = DataType::QuantizedSymm8PerAxis;
     const DataType biasType   = DataType::Signed32;
 
@@ -3390,6 +3390,6 @@ LayerTestResult<uint8_t, 4> CompareDepthwiseConvolution2dUint8Test(
     armnn::IWorkloadFactory& refWorkloadFactory,
     const armnn::DataLayout layout)
 {
-    return CompareDepthwiseConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8>(
+    return CompareDepthwiseConvolution2dTestImpl<armnn::DataType::QAsymmU8>(
         workloadFactory, memoryManager, refWorkloadFactory, layout);
 }
index a4db5686b6f03bd7b8a1268e0cbad9f0f7f9da60..eef8372addf1da0df83a91c4360877084af16b31 100644 (file)
@@ -314,54 +314,54 @@ LayerTestResult<uint8_t, 4> Debug4dUint8Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return Debug4dTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
+    return Debug4dTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
 }
 
 LayerTestResult<uint8_t, 3> Debug3dUint8Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return Debug3dTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
+    return Debug3dTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
 }
 
 LayerTestResult<uint8_t, 2> Debug2dUint8Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return Debug2dTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
+    return Debug2dTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
 }
 
 LayerTestResult<uint8_t, 1> Debug1dUint8Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return Debug1dTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
+    return Debug1dTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
 }
 
 LayerTestResult<int16_t, 4> Debug4dInt16Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return Debug4dTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
+    return Debug4dTest<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
 }
 
 LayerTestResult<int16_t, 3> Debug3dInt16Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return Debug3dTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
+    return Debug3dTest<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
 }
 
 LayerTestResult<int16_t, 2> Debug2dInt16Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return Debug2dTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
+    return Debug2dTest<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
 }
 
 LayerTestResult<int16_t, 1> Debug1dInt16Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return Debug1dTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
+    return Debug1dTest<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
 }
index 5e5cba349ef21436315b9d112e90f0813c7baba0..96fa24a0cbfa9c94060f611835f14c7519b4efd6 100644 (file)
@@ -284,51 +284,51 @@ DepthToSpaceTest4<armnn::DataType::Float16>(
     armnn::DataLayout dataLayout);
 
 // QuantisedAsymm8
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
-DepthToSpaceTest1<armnn::DataType::QuantisedAsymm8>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
+DepthToSpaceTest1<armnn::DataType::QAsymmU8>(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     armnn::DataLayout dataLayout);
 
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
-DepthToSpaceTest2<armnn::DataType::QuantisedAsymm8>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
+DepthToSpaceTest2<armnn::DataType::QAsymmU8>(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     armnn::DataLayout dataLayout);
 
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
-DepthToSpaceTest3<armnn::DataType::QuantisedAsymm8>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
+DepthToSpaceTest3<armnn::DataType::QAsymmU8>(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     armnn::DataLayout dataLayout);
 
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
-DepthToSpaceTest4<armnn::DataType::QuantisedAsymm8>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
+DepthToSpaceTest4<armnn::DataType::QAsymmU8>(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     armnn::DataLayout dataLayout);
 
 // QuantisedSymm16
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
-DepthToSpaceTest1<armnn::DataType::QuantisedSymm16>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
+DepthToSpaceTest1<armnn::DataType::QSymmS16>(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     armnn::DataLayout dataLayout);
 
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
-DepthToSpaceTest2<armnn::DataType::QuantisedSymm16>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
+DepthToSpaceTest2<armnn::DataType::QSymmS16>(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     armnn::DataLayout dataLayout);
 
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
-DepthToSpaceTest3<armnn::DataType::QuantisedSymm16>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
+DepthToSpaceTest3<armnn::DataType::QSymmS16>(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     armnn::DataLayout dataLayout);
 
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
-DepthToSpaceTest4<armnn::DataType::QuantisedSymm16>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
+DepthToSpaceTest4<armnn::DataType::QSymmS16>(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     armnn::DataLayout dataLayout);
index fb225aeb5406b12f3e4ca1ad0ad34e5a3f075863..7a757d59eed027a2c68bceee8cc9eb334568dfee 100644 (file)
@@ -134,14 +134,14 @@ LayerTestResult<float, 4> DequantizeSimpleUint8Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return DequantizeSimpleTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
+    return DequantizeSimpleTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
 }
 
 LayerTestResult<float, 4> DequantizeOffsetUint8Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return DequantizeOffsetTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
+    return DequantizeOffsetTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
 }
 
 LayerTestResult<float, 4> DequantizeSimpleInt8Test(
@@ -155,14 +155,14 @@ LayerTestResult<float, 4> DequantizeSimpleInt16Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return DequantizeSimpleTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
+    return DequantizeSimpleTest<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
 }
 
 LayerTestResult<armnn::Half, 4> DequantizeSimpleUint8ToFp16Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return DequantizeSimpleTest<armnn::DataType::QuantisedAsymm8, armnn::DataType::Float16>(workloadFactory,
+    return DequantizeSimpleTest<armnn::DataType::QAsymmU8, armnn::DataType::Float16>(workloadFactory,
                                                                                             memoryManager);
 }
 
@@ -177,6 +177,6 @@ LayerTestResult<armnn::Half, 4> DequantizeSimpleInt16ToFp16Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return DequantizeSimpleTest<armnn::DataType::QuantisedSymm16, armnn::DataType::Float16>(workloadFactory,
+    return DequantizeSimpleTest<armnn::DataType::QSymmS16, armnn::DataType::Float16>(workloadFactory,
                                                                                             memoryManager);
 }
index b908f96e9fde0b575fb0a540666daa04bb0a1f65..223beb49e832434712abb11e365a35bf75b4d57b 100644 (file)
@@ -288,7 +288,7 @@ LayerTestResult<uint8_t, 4> DivisionUint8Test(
         4, 4, 4, 4,  5, 5, 5, 5
     };
 
-    return ElementwiseTestHelper<4, armnn::DivisionQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
+    return ElementwiseTestHelper<4, armnn::DivisionQueueDescriptor, armnn::DataType::QAsymmU8>(
         workloadFactory,
         memoryManager,
         shape,
@@ -314,7 +314,7 @@ LayerTestResult<uint8_t, 4> DivisionBroadcast1ElementUint8Test(
 
     std::vector<uint8_t> output = { 1, 2, 3, 4, 5, 6, 7, 8};
 
-    return ElementwiseTestHelper<4, armnn::DivisionQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
+    return ElementwiseTestHelper<4, armnn::DivisionQueueDescriptor, armnn::DataType::QAsymmU8>(
         workloadFactory,
         memoryManager,
         shape0,
@@ -348,7 +348,7 @@ LayerTestResult<uint8_t, 4> DivisionBroadcast1DVectorUint8Test(
        13, 14,   15, 16,   17, 18
     };
 
-    return ElementwiseTestHelper<4, armnn::DivisionQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
+    return ElementwiseTestHelper<4, armnn::DivisionQueueDescriptor, armnn::DataType::QAsymmU8>(
         workloadFactory,
         memoryManager,
         shape0,
@@ -383,7 +383,7 @@ LayerTestResult<int16_t,4> DivisionInt16Test(
         4, 4, 4, 4,  5, 5, 5, 5
     };
 
-    return ElementwiseTestHelper<4, armnn::DivisionQueueDescriptor, armnn::DataType::QuantisedSymm16>(
+    return ElementwiseTestHelper<4, armnn::DivisionQueueDescriptor, armnn::DataType::QSymmS16>(
         workloadFactory,
         memoryManager,
         shape,
@@ -409,7 +409,7 @@ LayerTestResult<int16_t, 4> DivisionBroadcast1ElementInt16Test(
 
     std::vector<int16_t> output = { 1, 2, 3, 4, 5, 6, 7, 8};
 
-    return ElementwiseTestHelper<4, armnn::DivisionQueueDescriptor, armnn::DataType::QuantisedSymm16>(
+    return ElementwiseTestHelper<4, armnn::DivisionQueueDescriptor, armnn::DataType::QSymmS16>(
         workloadFactory,
         memoryManager,
         shape0,
@@ -443,7 +443,7 @@ LayerTestResult<int16_t, 4> DivisionBroadcast1DVectorInt16Test(
         13, 14,   15, 16,   17, 18
     };
 
-    return ElementwiseTestHelper<4, armnn::DivisionQueueDescriptor, armnn::DataType::QuantisedSymm16>(
+    return ElementwiseTestHelper<4, armnn::DivisionQueueDescriptor, armnn::DataType::QSymmS16>(
         workloadFactory,
         memoryManager,
         shape0,
index a3d29dac71110d1cd0661a62cea93e87d93641ab..ebad7fc91c77e91185fed42b5cc6ea4db350a157 100644 (file)
@@ -71,7 +71,7 @@ SimpleFloorTest<armnn::DataType::Float16>(
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
 
 
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
-SimpleFloorTest<armnn::DataType::QuantisedSymm16>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
+SimpleFloorTest<armnn::DataType::QSymmS16>(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
index a3fe8582c865ebb326e181bf7cded90b2ddcda4b..34bd9ec75e1ef87a5c99d19b0e8faab049918de7 100644 (file)
@@ -229,14 +229,14 @@ LayerTestResult<T, 2> FullyConnectedLargeTestCommon(
 // Explicit template specializations
 //
 
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 2>
-FullyConnectedTest<armnn::DataType::QuantisedAsymm8>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 2>
+FullyConnectedTest<armnn::DataType::QAsymmU8>(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     bool biasEnabled);
 
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 2>
-FullyConnectedTest<armnn::DataType::QuantisedSymm16>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 2>
+FullyConnectedTest<armnn::DataType::QSymmS16>(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     bool biasEnabled);
index 1ccf51c7d2cfc50f60ecd927e2cddc9cac1b4993..c6f58057c58d30b925ab7cc6502e86d82c2c23ac 100644 (file)
@@ -315,14 +315,14 @@ LayerTestResult<uint8_t, 1> Gather1dParamsUint8Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return GatherTestHelper<armnn::DataType::QuantisedAsymm8>::Gather1dParamsTestImpl(workloadFactory, memoryManager);
+    return GatherTestHelper<armnn::DataType::QAsymmU8>::Gather1dParamsTestImpl(workloadFactory, memoryManager);
 }
 
 LayerTestResult<int16_t, 1> Gather1dParamsInt16Test(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return GatherTestHelper<armnn::DataType::QuantisedSymm16>::Gather1dParamsTestImpl(workloadFactory, memoryManager);
+    return GatherTestHelper<armnn::DataType::QSymmS16>::Gather1dParamsTestImpl(workloadFactory, memoryManager);
 }
 
 LayerTestResult<float, 2> GatherMultiDimParamsFloat32Test(
@@ -343,7 +343,7 @@ LayerTestResult<uint8_t, 2> GatherMultiDimParamsUint8Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return GatherTestHelper<armnn::DataType::QuantisedAsymm8>::GatherMultiDimParamsTestImpl(
+    return GatherTestHelper<armnn::DataType::QAsymmU8>::GatherMultiDimParamsTestImpl(
         workloadFactory, memoryManager);
 }
 
@@ -351,7 +351,7 @@ LayerTestResult<int16_t, 2> GatherMultiDimParamsInt16Test(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return GatherTestHelper<armnn::DataType::QuantisedSymm16>::GatherMultiDimParamsTestImpl(
+    return GatherTestHelper<armnn::DataType::QSymmS16>::GatherMultiDimParamsTestImpl(
         workloadFactory, memoryManager);
 }
 
@@ -375,7 +375,7 @@ LayerTestResult<uint8_t, 4> GatherMultiDimParamsMultiDimIndicesUint8Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return GatherTestHelper<armnn::DataType::QuantisedAsymm8>::GatherMultiDimParamsMultiDimIndicesTestImpl(
+    return GatherTestHelper<armnn::DataType::QAsymmU8>::GatherMultiDimParamsMultiDimIndicesTestImpl(
         workloadFactory, memoryManager);
 }
 
@@ -383,6 +383,6 @@ LayerTestResult<int16_t, 4> GatherMultiDimParamsMultiDimIndicesInt16Test(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return GatherTestHelper<armnn::DataType::QuantisedSymm16>::GatherMultiDimParamsMultiDimIndicesTestImpl(
+    return GatherTestHelper<armnn::DataType::QSymmS16>::GatherMultiDimParamsMultiDimIndicesTestImpl(
         workloadFactory, memoryManager);
 }
index 4b16921990f818c8a7ca416991acfb88ccb4d2c6..e500a126f6359fa589aab3e0851d2f67e0511d5c 100644 (file)
@@ -599,7 +599,7 @@ LayerTestResult<int16_t, 4> L2Normalization1dInt16Test(
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     const armnn::DataLayout layout)
 {
-    return L2Normalization1dTestCommon<armnn::DataType::QuantisedSymm16>(
+    return L2Normalization1dTestCommon<armnn::DataType::QSymmS16>(
         workloadFactory,
         memoryManager,
         1.f,
@@ -614,7 +614,7 @@ LayerTestResult<uint8_t, 4> L2Normalization1dUint8Test(
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     const armnn::DataLayout layout)
 {
-    return L2Normalization1dTestCommon<armnn::DataType::QuantisedAsymm8>(
+    return L2Normalization1dTestCommon<armnn::DataType::QAsymmU8>(
         workloadFactory,
         memoryManager,
         1.f,
@@ -644,7 +644,7 @@ LayerTestResult<int16_t, 4> L2Normalization2dInt16Test(
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     const armnn::DataLayout layout)
 {
-    return L2Normalization1dTestCommon<armnn::DataType::QuantisedSymm16>(
+    return L2Normalization1dTestCommon<armnn::DataType::QSymmS16>(
         workloadFactory,
         memoryManager,
         1.f,
@@ -659,7 +659,7 @@ LayerTestResult<uint8_t, 4> L2Normalization2dUint8Test(
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     const armnn::DataLayout layout)
 {
-    return L2Normalization1dTestCommon<armnn::DataType::QuantisedAsymm8>(
+    return L2Normalization1dTestCommon<armnn::DataType::QAsymmU8>(
         workloadFactory,
         memoryManager,
         1.f,
@@ -748,7 +748,7 @@ LayerTestResult<int16_t, 4> L2Normalization3dInt16Test(
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     const armnn::DataLayout layout)
 {
-    return L2Normalization1dTestCommon<armnn::DataType::QuantisedSymm16>(
+    return L2Normalization1dTestCommon<armnn::DataType::QSymmS16>(
         workloadFactory,
         memoryManager,
         1.f,
@@ -763,7 +763,7 @@ LayerTestResult<uint8_t, 4> L2Normalization3dUint8Test(
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     const armnn::DataLayout layout)
 {
-    return L2Normalization1dTestCommon<armnn::DataType::QuantisedAsymm8>(
+    return L2Normalization1dTestCommon<armnn::DataType::QAsymmU8>(
         workloadFactory,
         memoryManager,
         1.f,
@@ -793,7 +793,7 @@ LayerTestResult<int16_t, 4> L2Normalization4dInt16Test(
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     const armnn::DataLayout layout)
 {
-    return L2Normalization1dTestCommon<armnn::DataType::QuantisedSymm16>(
+    return L2Normalization1dTestCommon<armnn::DataType::QSymmS16>(
         workloadFactory,
         memoryManager,
         1.f,
@@ -808,7 +808,7 @@ LayerTestResult<uint8_t, 4> L2Normalization4dUint8Test(
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     const armnn::DataLayout layout)
 {
-    return L2Normalization1dTestCommon<armnn::DataType::QuantisedAsymm8>(
+    return L2Normalization1dTestCommon<armnn::DataType::QAsymmU8>(
         workloadFactory,
         memoryManager,
         1.f,
index e755aa54cb0803005f585df8ad5a0e238a52070c..c61a0526a1ae1913e9c99854252d2c710138a785 100644 (file)
@@ -1573,17 +1573,17 @@ LayerTestResult<uint8_t, 2> QuantizedLstmTestImpl(
 
     // Input/Output tensor info
     armnn::TensorInfo inputInfo({numBatches , inputSize},
-                                 armnn::DataType::QuantisedAsymm8,
+                                 armnn::DataType::QAsymmU8,
                                  inputOutputScale,
                                  inputOutputOffset);
 
     armnn::TensorInfo cellStateInfo({numBatches , outputSize},
-                                     armnn::DataType::QuantisedSymm16,
+                                     armnn::DataType::QSymmS16,
                                      cellStateScale,
                                      cellStateOffset);
 
     armnn::TensorInfo outputStateInfo({numBatches , outputSize},
-                                       armnn::DataType::QuantisedAsymm8,
+                                       armnn::DataType::QAsymmU8,
                                        inputOutputScale,
                                        inputOutputOffset);
 
@@ -1635,12 +1635,12 @@ LayerTestResult<uint8_t, 2> QuantizedLstmTestImpl(
 
     // Weights and bias tensor and quantization info
     armnn::TensorInfo inputWeightsInfo({outputSize, inputSize},
-                                        armnn::DataType::QuantisedAsymm8,
+                                        armnn::DataType::QAsymmU8,
                                         weightsScale,
                                         weightsOffset);
 
     armnn::TensorInfo recurrentWeightsInfo({outputSize, outputSize},
-                                            armnn::DataType::QuantisedAsymm8,
+                                            armnn::DataType::QAsymmU8,
                                             weightsScale,
                                             weightsOffset);
 
@@ -1965,8 +1965,8 @@ LayerTestResult<int16_t, 2> LstmLayerInt16NoCifgNoPeepholeNoProjectionTest(
     const float qScale = 1.0f;
     const int32_t qOffset = 0;
 
-    const armnn::DataType datatype = armnn::DataType::QuantisedSymm16;
-    const armnn::DataType constantDatatype = armnn::DataType::QuantisedAsymm8;
+    const armnn::DataType datatype = armnn::DataType::QSymmS16;
+    const armnn::DataType constantDatatype = armnn::DataType::QAsymmU8;
 
     armnn::TensorInfo inputDesc({2, 2}, datatype);
     boost::multi_array<int16_t , 2> input = MakeTensor<int16_t , 2>(
@@ -1995,8 +1995,8 @@ LayerTestResult<int16_t, 2> LstmLayerInt16WithCifgWithPeepholeNoProjectionTest(
     const float qScale = 1.0f;
     const int32_t qOffset = 0;
 
-    const armnn::DataType datatype = armnn::DataType::QuantisedSymm16;
-    const armnn::DataType constantDatatype = armnn::DataType::QuantisedAsymm8;
+    const armnn::DataType datatype = armnn::DataType::QSymmS16;
+    const armnn::DataType constantDatatype = armnn::DataType::QAsymmU8;
 
     armnn::TensorInfo inputDesc({ 2, 2 }, datatype);
     boost::multi_array<int16_t, 2> input =
@@ -2026,8 +2026,8 @@ LayerTestResult<int16_t, 2> LstmLayerInt16NoCifgWithPeepholeWithProjectionTest(
     const float qScale = 2.0f;
     const int32_t qOffset = 0;
 
-    const armnn::DataType datatype = armnn::DataType::QuantisedSymm16;
-    const armnn::DataType constantDatatype = armnn::DataType::QuantisedAsymm8;
+    const armnn::DataType datatype = armnn::DataType::QSymmS16;
+    const armnn::DataType constantDatatype = armnn::DataType::QAsymmU8;
 
     armnn::TensorInfo inputDesc({ 2, 5 }, datatype);
     boost::multi_array<int16_t, 2> input =
@@ -2068,7 +2068,7 @@ LayerTestResult<int16_t, 2> LstmLayerInt16NoCifgNoPeepholeNoProjectionInt16Const
     const float qScale = 1.0f;
     const int32_t qOffset = 0;
 
-    const armnn::DataType datatype = armnn::DataType::QuantisedSymm16; // datatype & constants set to QSymm16
+    const armnn::DataType datatype = armnn::DataType::QSymmS16; // datatype & constants set to QSymm16
 
     armnn::TensorInfo inputDesc({2, 2}, datatype);
     boost::multi_array<int16_t , 2> input =
@@ -2098,11 +2098,11 @@ LayerTestResult<uint8_t, 2> QuantizedLstmTest(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    armnn::TensorInfo inputDesc({2, 2}, armnn::DataType::QuantisedAsymm8);
+    armnn::TensorInfo inputDesc({2, 2}, armnn::DataType::QAsymmU8);
     boost::multi_array<uint8_t, 2> input = MakeTensor<uint8_t, 2>(inputDesc, std::vector<uint8_t>(
         {166, 179, 50, 150}));
 
-    armnn::TensorInfo outputDesc({2, 4}, armnn::DataType::QuantisedAsymm8);
+    armnn::TensorInfo outputDesc({2, 4}, armnn::DataType::QAsymmU8);
     boost::multi_array<uint8_t, 2> expectedOutput = MakeTensor<uint8_t, 2>(outputDesc, std::vector<uint8_t>(
         {140, 151, 146, 112, 136, 156, 142, 112 }));
 
index 021869703818c4aae3d46d1617bfc8d0c827977c..5147cffddb638cb9e13b614323929accd2112de1 100644 (file)
@@ -238,7 +238,7 @@ LayerTestResult<uint8_t, 4> MaximumUint8Test(
         4, 4, 4, 4, 5, 5, 5, 5
     };
 
-    return ElementwiseTestHelper<4, armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
+    return ElementwiseTestHelper<4, armnn::MaximumQueueDescriptor, armnn::DataType::QAsymmU8>(
         workloadFactory,
         memoryManager,
         shape,
@@ -270,7 +270,7 @@ LayerTestResult<uint8_t, 4> MaximumBroadcast1ElementUint8Test(
         7, 8, 9, 10, 11, 12
     };
 
-    return ElementwiseTestHelper<4, armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
+    return ElementwiseTestHelper<4, armnn::MaximumQueueDescriptor, armnn::DataType::QAsymmU8>(
         workloadFactory,
         memoryManager,
         shape0,
@@ -301,7 +301,7 @@ LayerTestResult<uint8_t, 4> MaximumBroadcast1DVectorUint8Test(
         7, 10, 9, 10, 11, 12
     };
 
-    return ElementwiseTestHelper<4, armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
+    return ElementwiseTestHelper<4, armnn::MaximumQueueDescriptor, armnn::DataType::QAsymmU8>(
         workloadFactory,
         memoryManager,
         shape0,
@@ -327,7 +327,7 @@ LayerTestResult<int16_t, 4> MaximumInt16Test(
     std::vector<int16_t> output({ 2, 2, 2, 2, 6, 6, 6, 6,
                                   4, 4, 4, 4, 5, 5, 5, 5 });
 
-    return ElementwiseTestHelper<4, armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
+    return ElementwiseTestHelper<4, armnn::MaximumQueueDescriptor, armnn::DataType::QSymmS16>(
         workloadFactory,
         memoryManager,
         shape,
@@ -359,7 +359,7 @@ LayerTestResult<int16_t, 4> MaximumBroadcast1ElementInt16Test(
         7, 8, 9, 10, 11, 12
     };
 
-    return ElementwiseTestHelper<4, armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
+    return ElementwiseTestHelper<4, armnn::MaximumQueueDescriptor, armnn::DataType::QSymmS16>(
         workloadFactory,
         memoryManager,
         shape0,
@@ -391,7 +391,7 @@ LayerTestResult<int16_t, 4> MaximumBroadcast1DVectorInt16Test(
         7, 10, 9, 10, 11, 12
     };
 
-    return ElementwiseTestHelper<4, armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
+    return ElementwiseTestHelper<4, armnn::MaximumQueueDescriptor, armnn::DataType::QSymmS16>(
         workloadFactory,
         memoryManager,
         shape0,
index ed12c7fa2cb755eea901e0fa711e074af553c376..a0a402911503001e25147dd14ae5dcb991416b4c 100644 (file)
@@ -86,7 +86,7 @@ LayerTestResult<uint8_t, 4> MinimumBroadcast1DVectorUint8Test(
         1, 1, 2, 1, 2, 3
     };
 
-    return ElementwiseTestHelper<4, armnn::MinimumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
+    return ElementwiseTestHelper<4, armnn::MinimumQueueDescriptor, armnn::DataType::QAsymmU8>(
         workloadFactory,
         memoryManager,
         shape0,
@@ -226,7 +226,7 @@ LayerTestResult<int16_t, 4> MinimumInt16Test(
         3, 3, 3, 3, 4, 4, 4, 4
     };
 
-    return ElementwiseTestHelper<4, armnn::MinimumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
+    return ElementwiseTestHelper<4, armnn::MinimumQueueDescriptor, armnn::DataType::QSymmS16>(
         workloadFactory,
         memoryManager,
         shape,
@@ -258,7 +258,7 @@ LayerTestResult<int16_t, 4> MinimumBroadcast1ElementInt16Test(
         2, 2, 2, 2, 2, 2
     };
 
-    return ElementwiseTestHelper<4, armnn::MinimumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
+    return ElementwiseTestHelper<4, armnn::MinimumQueueDescriptor, armnn::DataType::QSymmS16>(
         workloadFactory,
         memoryManager,
         shape0,
@@ -290,7 +290,7 @@ LayerTestResult<int16_t, 4> MinimumBroadcast1DVectorInt16Test(
         1, 8, 3, 1, 10, 3
     };
 
-    return ElementwiseTestHelper<4, armnn::MinimumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
+    return ElementwiseTestHelper<4, armnn::MinimumQueueDescriptor, armnn::DataType::QSymmS16>(
         workloadFactory,
         memoryManager,
         shape0,
index a39e6bd8272e2a6adb0604e1164a51e749cb18eb..d32e0cf89bca584e96bd7ad9dd84b28fe9ed817d 100644 (file)
@@ -216,7 +216,7 @@ LayerTestResult<uint8_t, 4> MultiplicationUint8Test(
     };
 
     // Scale/offset chosen to have output values out of range
-    return ElementwiseTestHelper<4, armnn::MultiplicationQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
+    return ElementwiseTestHelper<4, armnn::MultiplicationQueueDescriptor, armnn::DataType::QAsymmU8>(
         workloadFactory,
         memoryManager,
         shape,
@@ -254,7 +254,7 @@ LayerTestResult<uint8_t, 4> MultiplicationBroadcast1ElementUint8Test(
         14, 16, 18,   20, 22, 24
     };
 
-    return ElementwiseTestHelper<4, armnn::MultiplicationQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
+    return ElementwiseTestHelper<4, armnn::MultiplicationQueueDescriptor, armnn::DataType::QAsymmU8>(
         workloadFactory,
         memoryManager,
         shape0,
@@ -286,7 +286,7 @@ LayerTestResult<uint8_t, 4> MultiplicationBroadcast1DVectorUint8Test(
         7, 16,  27,    10, 22, 36
     };
 
-    return ElementwiseTestHelper<4, armnn::MultiplicationQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
+    return ElementwiseTestHelper<4, armnn::MultiplicationQueueDescriptor, armnn::DataType::QAsymmU8>(
         workloadFactory,
         memoryManager,
         shape0,
@@ -321,7 +321,7 @@ LayerTestResult<int16_t, 4> MultiplicationInt16Test(
         84, 104, 126, 150, 176, 204
     };
 
-    return ElementwiseTestHelper<4, armnn::MultiplicationQueueDescriptor, armnn::DataType::QuantisedSymm16>(
+    return ElementwiseTestHelper<4, armnn::MultiplicationQueueDescriptor, armnn::DataType::QSymmS16>(
         workloadFactory,
         memoryManager,
         shape,
@@ -353,7 +353,7 @@ LayerTestResult<int16_t, 4> MultiplicationBroadcast1ElementInt16Test(
         14, 16, 18, 20, 22, 24
     };
 
-    return ElementwiseTestHelper<4, armnn::MultiplicationQueueDescriptor, armnn::DataType::QuantisedSymm16>(
+    return ElementwiseTestHelper<4, armnn::MultiplicationQueueDescriptor, armnn::DataType::QSymmS16>(
         workloadFactory,
         memoryManager,
         shape0,
@@ -385,7 +385,7 @@ LayerTestResult<int16_t, 4> MultiplicationBroadcast1DVectorInt16Test(
         7, 16, 27, 10, 22, 36
     };
 
-    return ElementwiseTestHelper<4, armnn::MultiplicationQueueDescriptor, armnn::DataType::QuantisedSymm16>(
+    return ElementwiseTestHelper<4, armnn::MultiplicationQueueDescriptor, armnn::DataType::QSymmS16>(
         workloadFactory,
         memoryManager,
         shape0,
index 3a8d2b7bbf32c7bde8f364b2af0d79020a8a52b8..9239c665ebe60b6364c811ec4b597afa750d0c09 100644 (file)
@@ -416,23 +416,23 @@ LayerTestResult<T, 4> Pad4dTestCommon(
 // Explicit template specializations
 //
 
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 2>
-Pad2dTestCommon<armnn::DataType::QuantisedSymm16>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 2>
+Pad2dTestCommon<armnn::DataType::QSymmS16>(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     float qScale,
     int32_t qOffset,
     const float customPaddingValue);
 
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 3>
-Pad3dTestCommon<armnn::DataType::QuantisedSymm16>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 3>
+Pad3dTestCommon<armnn::DataType::QSymmS16>(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     float qScale,
     int32_t qOffset);
 
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
-Pad4dTestCommon<armnn::DataType::QuantisedSymm16>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
+Pad4dTestCommon<armnn::DataType::QSymmS16>(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     float qScale,
@@ -446,28 +446,28 @@ LayerTestResult<uint8_t, 2> PadUint82dTest(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return Pad2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
+    return Pad2dTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, 1.0f, 0);
 }
 
 LayerTestResult<uint8_t, 2> PadUint82dCustomPaddingTest(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return Pad2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0, 1.0f);
+    return Pad2dTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, 1.0f, 0, 1.0f);
 }
 
 LayerTestResult<uint8_t, 3> PadUint83dTest(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return Pad3dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
+    return Pad3dTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, 1.0f, 0);
 }
 
 LayerTestResult<uint8_t, 4> PadUint84dTest(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return Pad4dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
+    return Pad4dTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, 1.0f, 0);
 }
 
 LayerTestResult<float, 2> PadFloat322dTest(
index 160e6582d50bde60f765dec0761837bfcd9b601e..b58e9826b86432dd39c6cd9a6fbccef9e1e03e61 100644 (file)
@@ -1366,7 +1366,7 @@ LayerTestResult<uint8_t, 4> SimpleMaxPooling2dSize2x2Stride2x2Uint8Test(
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     bool forceNoPadding)
 {
-    return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::QuantisedAsymm8>(
+    return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::QAsymmU8>(
         workloadFactory, memoryManager, forceNoPadding, 3.0f, -5);
 }
 
@@ -1375,7 +1375,7 @@ LayerTestResult<int16_t, 4> SimpleMaxPooling2dSize2x2Stride2x2Int16Test(
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     bool forceNoPadding)
 {
-    return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::QuantisedSymm16>(
+    return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::QSymmS16>(
             workloadFactory, memoryManager, forceNoPadding);
 }
 
@@ -1393,7 +1393,7 @@ LayerTestResult<uint8_t, 4> SimpleMaxPooling2dSize3x3Stride2x4Uint8Test(
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     bool forceNoPadding)
 {
-    return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::QuantisedAsymm8>(
+    return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::QAsymmU8>(
         workloadFactory, memoryManager, forceNoPadding, 0.1f, 128);
 }
 
@@ -1402,7 +1402,7 @@ LayerTestResult<int16_t, 4> SimpleMaxPooling2dSize3x3Stride2x4Int16Test(
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     bool forceNoPadding)
 {
-    return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::QuantisedSymm16>(
+    return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::QSymmS16>(
             workloadFactory, memoryManager, forceNoPadding);
 }
 
@@ -1419,7 +1419,7 @@ LayerTestResult<uint8_t, 4> SimpleMaxPooling2dUint8Test(
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     const armnn::DataLayout dataLayout)
 {
-    return SimpleMaxPooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, dataLayout);
+    return SimpleMaxPooling2dTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, dataLayout);
 }
 
 LayerTestResult<int16_t, 4> SimpleMaxPooling2dInt16Test(
@@ -1427,7 +1427,7 @@ LayerTestResult<int16_t, 4> SimpleMaxPooling2dInt16Test(
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     const armnn::DataLayout dataLayout)
 {
-    return SimpleMaxPooling2dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, dataLayout);
+    return SimpleMaxPooling2dTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, dataLayout);
 }
 LayerTestResult<float, 4> IgnorePaddingSimpleMaxPooling2dTest(
     armnn::IWorkloadFactory& workloadFactory,
@@ -1440,7 +1440,7 @@ LayerTestResult<uint8_t, 4> IgnorePaddingSimpleMaxPooling2dUint8Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
+    return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::QAsymmU8>(
             workloadFactory, memoryManager, 1.0f, -5);
 }
 
@@ -1448,7 +1448,7 @@ LayerTestResult<int16_t, 4> IgnorePaddingSimpleMaxPooling2dInt16Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::QuantisedSymm16>(
+    return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::QSymmS16>(
             workloadFactory, memoryManager);
 }
 
@@ -1463,7 +1463,7 @@ LayerTestResult<uint8_t, 4> IgnorePaddingMaxPooling2dSize3Uint8Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::QuantisedAsymm8>(
+    return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::QAsymmU8>(
             workloadFactory, memoryManager, 1.0f, -5);
 }
 
@@ -1471,7 +1471,7 @@ LayerTestResult<int16_t, 4> IgnorePaddingMaxPooling2dSize3Int16Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::QuantisedSymm16>(
+    return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::QSymmS16>(
             workloadFactory, memoryManager);
 }
 
@@ -1488,7 +1488,7 @@ LayerTestResult<uint8_t, 4> SimpleAveragePooling2dUint8Test(
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     const armnn::DataLayout dataLayout)
 {
-    return SimpleAveragePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
+    return SimpleAveragePooling2dTestCommon<armnn::DataType::QAsymmU8>(
         workloadFactory, memoryManager, dataLayout, 0.5, -1);
 }
 
@@ -1497,7 +1497,7 @@ LayerTestResult<int16_t, 4> SimpleAveragePooling2dInt16Test(
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     const armnn::DataLayout dataLayout)
 {
-    return SimpleAveragePooling2dTestCommon<armnn::DataType::QuantisedSymm16>(
+    return SimpleAveragePooling2dTestCommon<armnn::DataType::QSymmS16>(
             workloadFactory, memoryManager, dataLayout);
 }
 
@@ -1521,7 +1521,7 @@ LayerTestResult<uint8_t, 4> LargeTensorsAveragePooling2dUint8Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
+    return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::QAsymmU8>(
         workloadFactory, memoryManager, 0.5, -1);
 }
 
@@ -1529,7 +1529,7 @@ LayerTestResult<int16_t, 4> LargeTensorsAveragePooling2dInt16Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::QuantisedSymm16>(
+    return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::QSymmS16>(
             workloadFactory, memoryManager);
 }
 LayerTestResult<float, 4> IgnorePaddingSimpleAveragePooling2dTest(
@@ -1543,7 +1543,7 @@ LayerTestResult<uint8_t, 4> IgnorePaddingSimpleAveragePooling2dUint8Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
+    return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::QAsymmU8>(
             workloadFactory, memoryManager);
 }
 
@@ -1551,7 +1551,7 @@ LayerTestResult<int16_t, 4> IgnorePaddingSimpleAveragePooling2dInt16Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::QuantisedSymm16>(
+    return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::QSymmS16>(
             workloadFactory, memoryManager);
 }
 
@@ -1567,7 +1567,7 @@ LayerTestResult<uint8_t, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingUint8Tes
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::QuantisedAsymm8>(
+    return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::QAsymmU8>(
             workloadFactory, memoryManager);
 }
 
@@ -1575,7 +1575,7 @@ LayerTestResult<int16_t, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingInt16Tes
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::QuantisedSymm16>(
+    return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::QSymmS16>(
             workloadFactory, memoryManager);
 }
 
@@ -1590,7 +1590,7 @@ LayerTestResult<uint8_t, 4> IgnorePaddingAveragePooling2dSize3Uint8Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::QuantisedAsymm8>(
+    return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::QAsymmU8>(
             workloadFactory, memoryManager);
 }
 
@@ -1598,7 +1598,7 @@ LayerTestResult<int16_t, 4> IgnorePaddingAveragePooling2dSize3Int16Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::QuantisedSymm16>(
+    return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::QSymmS16>(
             workloadFactory, memoryManager);
 }
 
@@ -1615,7 +1615,7 @@ LayerTestResult<uint8_t, 4> SimpleL2Pooling2dUint8Test(
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     const armnn::DataLayout dataLayout)
 {
-    return SimpleL2Pooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, dataLayout);
+    return SimpleL2Pooling2dTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, dataLayout);
 }
 
 LayerTestResult<int16_t, 4> SimpleL2Pooling2dInt16Test(
@@ -1623,7 +1623,7 @@ LayerTestResult<int16_t, 4> SimpleL2Pooling2dInt16Test(
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     const armnn::DataLayout dataLayout)
 {
-    return SimpleL2Pooling2dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, dataLayout);
+    return SimpleL2Pooling2dTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, dataLayout);
 }
 
 LayerTestResult<float, 4> L2Pooling2dSize3Stride1Test(
@@ -1637,14 +1637,14 @@ LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride1Uint8Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
+    return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
 }
 
 LayerTestResult<int16_t, 4> L2Pooling2dSize3Stride1Int16Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
+    return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
 }
 
 LayerTestResult<float, 4> L2Pooling2dSize3Stride3Test(
@@ -1658,14 +1658,14 @@ LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride3Uint8Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
+    return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
 }
 
 LayerTestResult<int16_t, 4> L2Pooling2dSize3Stride3Int16Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
+    return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
 }
 LayerTestResult<float, 4> L2Pooling2dSize3Stride4Test(
     armnn::IWorkloadFactory& workloadFactory,
@@ -1678,14 +1678,14 @@ LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride4Uint8Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
+    return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
 }
 
 LayerTestResult<int16_t, 4> L2Pooling2dSize3Stride4Int16Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
+    return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
 }
 
 LayerTestResult<float, 4> L2Pooling2dSize7Test(
@@ -1699,14 +1699,14 @@ LayerTestResult<uint8_t, 4> L2Pooling2dSize7Uint8Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return L2Pooling2dSize7TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
+    return L2Pooling2dSize7TestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
 }
 
 LayerTestResult<int16_t, 4> L2Pooling2dSize7Int16Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return L2Pooling2dSize7TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
+    return L2Pooling2dSize7TestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
 }
 
 LayerTestResult<float, 4> L2Pooling2dSize9Test(
@@ -1720,14 +1720,14 @@ LayerTestResult<uint8_t, 4> L2Pooling2dSize9Uint8Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return L2Pooling2dSize9TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
+    return L2Pooling2dSize9TestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
 }
 
 LayerTestResult<int16_t, 4> L2Pooling2dSize9Int16Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return L2Pooling2dSize9TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
+    return L2Pooling2dSize9TestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
 }
 LayerTestResult<float, 4> IgnorePaddingSimpleL2Pooling2dTest(
     armnn::IWorkloadFactory& workloadFactory,
@@ -1740,14 +1740,14 @@ LayerTestResult<uint8_t, 4> IgnorePaddingSimpleL2Pooling2dUint8Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
+    return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
 }
 
 LayerTestResult<int16_t, 4> IgnorePaddingSimpleL2Pooling2dInt16Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
+    return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
 }
 
 LayerTestResult<float, 4> IgnorePaddingL2Pooling2dSize3Test(
@@ -1761,14 +1761,14 @@ LayerTestResult<uint8_t, 4> IgnorePaddingL2Pooling2dSize3Uint8Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
+    return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
 }
 
 LayerTestResult<int16_t, 4> IgnorePaddingL2Pooling2dSize3Int16Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
+    return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
 }
 
 LayerTestResult<float, 4> AsymmetricNonSquarePooling2dTest(
@@ -1782,14 +1782,14 @@ LayerTestResult<uint8_t, 4> AsymmetricNonSquarePooling2dUint8Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
+    return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
 }
 
 LayerTestResult<int16_t, 4> AsymmetricNonSquarePooling2dInt16Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
+    return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
 }
 
 LayerTestResult<float, 4> ComparePooling2dTest(
@@ -1808,7 +1808,7 @@ LayerTestResult<uint8_t, 4> ComparePooling2dUint8Test(
     armnn::IWorkloadFactory& refWorkloadFactory,
     armnn::PoolingAlgorithm  poolingType)
 {
-    return ComparePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
+    return ComparePooling2dTestCommon<armnn::DataType::QAsymmU8>(
         workloadFactory, memoryManager, refWorkloadFactory, poolingType, 0.1f, 128);
 }
 
@@ -1818,6 +1818,6 @@ LayerTestResult<int16_t, 4> ComparePooling2dInt16Test(
     armnn::IWorkloadFactory& refWorkloadFactory,
     armnn::PoolingAlgorithm  poolingType)
 {
-    return ComparePooling2dTestCommon<armnn::DataType::QuantisedSymm16>(
+    return ComparePooling2dTestCommon<armnn::DataType::QSymmS16>(
             workloadFactory, memoryManager, refWorkloadFactory, poolingType);
 }
index e23f92a5a9aece42fd0e77745d94f103216269f1..ab6a35b16f8e3afd811181d0294c2fc65ae81b6b 100644 (file)
@@ -130,14 +130,14 @@ LayerTestResult<uint8_t, 4> QuantizeSimpleUint8Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return QuantizeSimpleTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
+    return QuantizeSimpleTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
 }
 
 LayerTestResult<uint8_t, 4> QuantizeClampUint8Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return QuantizeClampTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
+    return QuantizeClampTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
 }
 
 LayerTestResult<int8_t, 4> QuantizeClampInt8Test(
@@ -151,5 +151,5 @@ LayerTestResult<int16_t, 4> QuantizeClampInt16Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return QuantizeClampTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
+    return QuantizeClampTest<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
 }
index 485e7eab80fc4a17225336b947a7434572767a09..894ece65a5c2585a158a49bba19b5d04fc96cc05 100644 (file)
@@ -176,13 +176,13 @@ SimpleReshapeTest<armnn::DataType::Float32>(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
 
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
-SimpleReshapeTest<armnn::DataType::QuantisedAsymm8>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
+SimpleReshapeTest<armnn::DataType::QAsymmU8>(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
 
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
-SimpleReshapeTest<armnn::DataType::QuantisedSymm16>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
+SimpleReshapeTest<armnn::DataType::QSymmS16>(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
 
@@ -191,12 +191,12 @@ Reshape5dTest<armnn::DataType::Float32>(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
 
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 5>
-Reshape5dTest<armnn::DataType::QuantisedAsymm8>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 5>
+Reshape5dTest<armnn::DataType::QAsymmU8>(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
 
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 5>
-Reshape5dTest<armnn::DataType::QuantisedSymm16>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 5>
+Reshape5dTest<armnn::DataType::QSymmS16>(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
index 080155eebf6908aa1513778ba7786143aafab549..0389e82c7aec35702f98956b73969c2bc6007ab9 100644 (file)
@@ -664,62 +664,62 @@ ResizeNearestNeighborMagTest<armnn::DataType::Float16>(
     int32_t outQuantOffset);
 
 // QAsymm8
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
-ResizeBilinearNopTest<armnn::DataType::QuantisedAsymm8>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
+ResizeBilinearNopTest<armnn::DataType::QAsymmU8>(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     const armnn::DataLayout dataLayout);
 
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
-SimpleResizeBilinearTest<armnn::DataType::QuantisedAsymm8>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
+SimpleResizeBilinearTest<armnn::DataType::QAsymmU8>(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     const armnn::DataLayout dataLayout);
 
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
-ResizeBilinearSqMinTest<armnn::DataType::QuantisedAsymm8>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
+ResizeBilinearSqMinTest<armnn::DataType::QAsymmU8>(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     const armnn::DataLayout dataLayout);
 
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
-ResizeBilinearMinTest<armnn::DataType::QuantisedAsymm8>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
+ResizeBilinearMinTest<armnn::DataType::QAsymmU8>(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     const armnn::DataLayout dataLayout);
 
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
-ResizeBilinearMagTest<armnn::DataType::QuantisedAsymm8>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
+ResizeBilinearMagTest<armnn::DataType::QAsymmU8>(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     const armnn::DataLayout dataLayout);
 
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
-ResizeNearestNeighborNopTest<armnn::DataType::QuantisedAsymm8>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
+ResizeNearestNeighborNopTest<armnn::DataType::QAsymmU8>(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     const armnn::DataLayout dataLayout);
 
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
-SimpleResizeNearestNeighborTest<armnn::DataType::QuantisedAsymm8>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
+SimpleResizeNearestNeighborTest<armnn::DataType::QAsymmU8>(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     const armnn::DataLayout dataLayout);
 
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
-ResizeNearestNeighborSqMinTest<armnn::DataType::QuantisedAsymm8>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
+ResizeNearestNeighborSqMinTest<armnn::DataType::QAsymmU8>(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     const armnn::DataLayout dataLayout);
 
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
-ResizeNearestNeighborMinTest<armnn::DataType::QuantisedAsymm8>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
+ResizeNearestNeighborMinTest<armnn::DataType::QAsymmU8>(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     const armnn::DataLayout dataLayout);
 
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
-ResizeNearestNeighborMagTest<armnn::DataType::QuantisedAsymm8>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
+ResizeNearestNeighborMagTest<armnn::DataType::QAsymmU8>(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     const armnn::DataLayout dataLayout,
@@ -729,62 +729,62 @@ ResizeNearestNeighborMagTest<armnn::DataType::QuantisedAsymm8>(
     int32_t outQuantOffset);
 
 // QSymm16
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
-ResizeBilinearNopTest<armnn::DataType::QuantisedSymm16>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
+ResizeBilinearNopTest<armnn::DataType::QSymmS16>(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     const armnn::DataLayout dataLayout);
 
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
-SimpleResizeBilinearTest<armnn::DataType::QuantisedSymm16>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
+SimpleResizeBilinearTest<armnn::DataType::QSymmS16>(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     const armnn::DataLayout dataLayout);
 
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
-ResizeBilinearSqMinTest<armnn::DataType::QuantisedSymm16>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
+ResizeBilinearSqMinTest<armnn::DataType::QSymmS16>(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     const armnn::DataLayout dataLayout);
 
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
-ResizeBilinearMinTest<armnn::DataType::QuantisedSymm16>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
+ResizeBilinearMinTest<armnn::DataType::QSymmS16>(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     const armnn::DataLayout dataLayout);
 
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
-ResizeBilinearMagTest<armnn::DataType::QuantisedSymm16>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
+ResizeBilinearMagTest<armnn::DataType::QSymmS16>(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     const armnn::DataLayout dataLayout);
 
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
-ResizeNearestNeighborNopTest<armnn::DataType::QuantisedSymm16>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
+ResizeNearestNeighborNopTest<armnn::DataType::QSymmS16>(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     const armnn::DataLayout dataLayout);
 
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
-SimpleResizeNearestNeighborTest<armnn::DataType::QuantisedSymm16>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
+SimpleResizeNearestNeighborTest<armnn::DataType::QSymmS16>(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     const armnn::DataLayout dataLayout);
 
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
-ResizeNearestNeighborSqMinTest<armnn::DataType::QuantisedSymm16>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
+ResizeNearestNeighborSqMinTest<armnn::DataType::QSymmS16>(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     const armnn::DataLayout dataLayout);
 
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
-ResizeNearestNeighborMinTest<armnn::DataType::QuantisedSymm16>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
+ResizeNearestNeighborMinTest<armnn::DataType::QSymmS16>(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     const armnn::DataLayout dataLayout);
 
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
-ResizeNearestNeighborMagTest<armnn::DataType::QuantisedSymm16>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
+ResizeNearestNeighborMagTest<armnn::DataType::QSymmS16>(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     const armnn::DataLayout dataLayout,
index 24a3b21e96074bdc35f7d4a89550d72e63d55857..4107e13eb8e1b957782204746038c65c98e1ea28 100644 (file)
@@ -227,13 +227,13 @@ Rsqrt2dTest<armnn::DataType::Float16>(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
 
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 2>
-Rsqrt2dTest<armnn::DataType::QuantisedAsymm8>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 2>
+Rsqrt2dTest<armnn::DataType::QAsymmU8>(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
 
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 2>
-Rsqrt2dTest<armnn::DataType::QuantisedSymm16>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 2>
+Rsqrt2dTest<armnn::DataType::QSymmS16>(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
 
@@ -247,13 +247,13 @@ Rsqrt3dTest<armnn::DataType::Float16>(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
 
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 3>
-Rsqrt3dTest<armnn::DataType::QuantisedAsymm8>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 3>
+Rsqrt3dTest<armnn::DataType::QAsymmU8>(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
 
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 3>
-Rsqrt3dTest<armnn::DataType::QuantisedSymm16>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 3>
+Rsqrt3dTest<armnn::DataType::QSymmS16>(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
 
index 65b17164f32f136238b80b3889ebaff6356434ae..fc78074a4365bedb018d6a521094c72343f9221c 100644 (file)
@@ -246,48 +246,48 @@ LayerTestResult<float, 1> Slice1dFloat32Test(armnn::IWorkloadFactory& workloadFa
 LayerTestResult<uint8_t, 4> Slice4dUint8Test(armnn::IWorkloadFactory& workloadFactory,
                                              const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return Slice4dTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
+    return Slice4dTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
 }
 
 LayerTestResult<uint8_t, 3> Slice3dUint8Test(armnn::IWorkloadFactory& workloadFactory,
                                              const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return Slice3dTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
+    return Slice3dTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
 }
 
 LayerTestResult<uint8_t, 2> Slice2dUint8Test(armnn::IWorkloadFactory& workloadFactory,
                                              const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return Slice2dTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
+    return Slice2dTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
 }
 
 LayerTestResult<uint8_t, 1> Slice1dUint8Test(armnn::IWorkloadFactory& workloadFactory,
                                              const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return Slice1dTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
+    return Slice1dTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
 }
 
 // Int16 tests
 LayerTestResult<int16_t, 4> Slice4dInt16Test(armnn::IWorkloadFactory& workloadFactory,
                                              const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return Slice4dTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
+    return Slice4dTest<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
 }
 
 LayerTestResult<int16_t, 3> Slice3dInt16Test(armnn::IWorkloadFactory& workloadFactory,
                                              const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return Slice3dTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
+    return Slice3dTest<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
 }
 
 LayerTestResult<int16_t, 2> Slice2dInt16Test(armnn::IWorkloadFactory& workloadFactory,
                                              const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return Slice2dTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
+    return Slice2dTest<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
 }
 
 LayerTestResult<int16_t, 1> Slice1dInt16Test(armnn::IWorkloadFactory& workloadFactory,
                                              const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return Slice1dTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
+    return Slice1dTest<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
 }
index 2a1aa76fce18bea8c37fea4f91ce6908ee580ed5..4147cc85167d6aef63b6f1b3bcd1271399de3606 100644 (file)
@@ -602,7 +602,7 @@ LayerTestResult<uint8_t,2> SimpleSoftmaxUint8Test(
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     float beta)
 {
-    return SimpleSoftmaxTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, beta);
+    return SimpleSoftmaxTestImpl<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, beta);
 }
 
 LayerTestResult<uint8_t,3> Simple3dSoftmaxUint8Test(
@@ -611,7 +611,7 @@ LayerTestResult<uint8_t,3> Simple3dSoftmaxUint8Test(
         float beta)
 {
     Simple3dSoftmaxOutputData data;
-    return Simple3dSoftmaxTestImpl<armnn::DataType::QuantisedAsymm8>(
+    return Simple3dSoftmaxTestImpl<armnn::DataType::QAsymmU8>(
         workloadFactory,
         memoryManager,
         beta,
@@ -627,7 +627,7 @@ LayerTestResult<uint8_t,4> Simple4dSoftmaxUint8Test(
 {
     Simple4dSoftmaxData data;
 
-    return Simple4dSoftmaxTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, beta,
+    return Simple4dSoftmaxTestImpl<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, beta,
                                                                      data.inputShape, data.outputData, data.inputData);
 }
 
@@ -664,7 +664,7 @@ LayerTestResult<int16_t,2> SimpleSoftmaxUint16Test(
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
         float beta)
 {
-    return SimpleSoftmaxTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, beta);
+    return SimpleSoftmaxTestImpl<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, beta);
 }
 
 LayerTestResult<int16_t,3> Simple3dSoftmaxUint16Test(
@@ -673,7 +673,7 @@ LayerTestResult<int16_t,3> Simple3dSoftmaxUint16Test(
         float beta)
 {
     Simple3dSoftmaxOutputData data;
-    return Simple3dSoftmaxTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, beta,
+    return Simple3dSoftmaxTestImpl<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, beta,
                                                                      data.inputShape, data.outputData, data.inputData);
 }
 
@@ -684,7 +684,7 @@ LayerTestResult<int16_t,4> Simple4dSoftmaxUint16Test(
 {
     Simple4dSoftmaxData data;
 
-    return Simple4dSoftmaxTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, beta,
+    return Simple4dSoftmaxTestImpl<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, beta,
                                                                      data.inputShape, data.outputData, data.inputData);
 }
 
@@ -704,6 +704,6 @@ LayerTestResult<uint8_t,2> CompareSoftmaxUint8Test(
     armnn::IWorkloadFactory& refWorkloadFactory,
     float beta)
 {
-    return CompareSoftmaxTestImpl<armnn::DataType::QuantisedAsymm8>(
+    return CompareSoftmaxTestImpl<armnn::DataType::QAsymmU8>(
         workloadFactory, memoryManager, refWorkloadFactory, beta);
 }
index 2793875c5b0af4fd2123b012bce34258e390edfd..afb47967039ff2ab5fa479d218906779cf19bf15 100644 (file)
@@ -337,28 +337,28 @@ LayerTestResult<uint8_t, 4> SpaceToBatchNdSimpleUint8Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return SpaceToBatchNdSimpleTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
+    return SpaceToBatchNdSimpleTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
 }
 
 LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiChannelsUint8Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return SpaceToBatchNdMultiChannelsTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
+    return SpaceToBatchNdMultiChannelsTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
 }
 
 LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiBlockUint8Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return SpaceToBatchNdMultiBlockTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
+    return SpaceToBatchNdMultiBlockTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
 }
 
 LayerTestResult<uint8_t, 4> SpaceToBatchNdPaddingUint8Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return SpaceToBatchNdPaddingTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
+    return SpaceToBatchNdPaddingTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
 }
 
 LayerTestResult<float, 4> SpaceToBatchNdSimpleNhwcFloat32Test(
@@ -421,82 +421,82 @@ LayerTestResult<uint8_t, 4> SpaceToBatchNdSimpleNhwcUint8Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return SpaceToBatchNdSimpleNhwcTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
+    return SpaceToBatchNdSimpleNhwcTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
 }
 
 LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiChannelsNhwcUint8Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return SpaceToBatchNdMultiChannelsNhwcTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
+    return SpaceToBatchNdMultiChannelsNhwcTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
 }
 
 LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiBlockNhwcUint8Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return SpaceToBatchNdMultiBlockNhwcTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
+    return SpaceToBatchNdMultiBlockNhwcTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
 }
 
 LayerTestResult<uint8_t, 4> SpaceToBatchNdPaddingNhwcUint8Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return SpaceToBatchNdPaddingNhwcTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
+    return SpaceToBatchNdPaddingNhwcTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
 }
 
 LayerTestResult<int16_t, 4> SpaceToBatchNdSimpleUint16Test(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return SpaceToBatchNdSimpleTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
+    return SpaceToBatchNdSimpleTest<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
 }
 
 LayerTestResult<int16_t, 4> SpaceToBatchNdMultiChannelsUint16Test(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return SpaceToBatchNdMultiChannelsTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
+    return SpaceToBatchNdMultiChannelsTest<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
 }
 
 LayerTestResult<int16_t, 4> SpaceToBatchNdMultiBlockUint16Test(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return SpaceToBatchNdMultiBlockTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
+    return SpaceToBatchNdMultiBlockTest<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
 }
 
 LayerTestResult<int16_t, 4> SpaceToBatchNdPaddingUint16Test(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return SpaceToBatchNdPaddingTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
+    return SpaceToBatchNdPaddingTest<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
 }
 
 LayerTestResult<int16_t, 4> SpaceToBatchNdSimpleNhwcUint16Test(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return SpaceToBatchNdSimpleNhwcTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
+    return SpaceToBatchNdSimpleNhwcTest<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
 }
 
 LayerTestResult<int16_t, 4> SpaceToBatchNdMultiChannelsNhwcUint16Test(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return SpaceToBatchNdMultiChannelsNhwcTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
+    return SpaceToBatchNdMultiChannelsNhwcTest<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
 }
 
 LayerTestResult<int16_t, 4> SpaceToBatchNdMultiBlockNhwcUint16Test(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return SpaceToBatchNdMultiBlockNhwcTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
+    return SpaceToBatchNdMultiBlockNhwcTest<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
 }
 
 LayerTestResult<int16_t, 4> SpaceToBatchNdPaddingNhwcUint16Test(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return SpaceToBatchNdPaddingNhwcTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
+    return SpaceToBatchNdPaddingNhwcTest<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
 }
index b6bf530da3fe14c8674256bf453e0b3767973f27..59e1481ad1f7df616b2c0d3bcad18389394f9134 100644 (file)
@@ -159,7 +159,7 @@ LayerTestResult<uint8_t, 4> SpaceToDepthNhwcAsymmQ8Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return SpaceToDepthSimpleTest1<armnn::DataType::QuantisedAsymm8>(
+    return SpaceToDepthSimpleTest1<armnn::DataType::QAsymmU8>(
         workloadFactory,
         memoryManager);
 }
@@ -168,7 +168,7 @@ LayerTestResult<uint8_t, 4> SpaceToDepthNchwAsymmQ8Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return SpaceToDepthSimpleTest1<armnn::DataType::QuantisedAsymm8>(
+    return SpaceToDepthSimpleTest1<armnn::DataType::QAsymmU8>(
         workloadFactory,
         memoryManager,
         armnn::DataLayout::NCHW);
@@ -235,7 +235,7 @@ LayerTestResult<int16_t, 4> SpaceToDepthNhwcQSymm16Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return SpaceToDepthSimpleTest2<armnn::DataType::QuantisedSymm16>(
+    return SpaceToDepthSimpleTest2<armnn::DataType::QSymmS16>(
         workloadFactory,
         memoryManager);
 }
@@ -244,7 +244,7 @@ LayerTestResult<int16_t, 4> SpaceToDepthNchwQSymm16Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return SpaceToDepthSimpleTest2<armnn::DataType::QuantisedSymm16>(
+    return SpaceToDepthSimpleTest2<armnn::DataType::QSymmS16>(
         workloadFactory,
         memoryManager,
         armnn::DataLayout::NCHW);
index c8c2f9c7d11cf082ec14f240697793ccedd65bcd..ef81a1dd1d0b4c2c49563778580e5fd0b6cbc13f 100644 (file)
@@ -341,14 +341,14 @@ std::vector<LayerTestResult<uint8_t,3>> SplitterUint8Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return SplitterTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
+    return SplitterTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, 1.0f, 0);
 }
 
 std::vector<LayerTestResult<int16_t,3>> SplitterInt16Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return SplitterTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.0f, 0);
+    return SplitterTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, 1.0f, 0);
 }
 
 LayerTestResult<float, 3> CopyViaSplitterFloat32Test(
@@ -369,12 +369,12 @@ LayerTestResult<uint8_t, 3> CopyViaSplitterUint8Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return CopyViaSplitterTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
+    return CopyViaSplitterTestImpl<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, 1.0f, 0);
 }
 
 LayerTestResult<int16_t, 3> CopyViaSplitterInt16Test(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return CopyViaSplitterTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.0f, 0);
+    return CopyViaSplitterTestImpl<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, 1.0f, 0);
 }
index 23f5df0df952bdd514198de55386c424d0864e82..c6c330e875c900fc116bde3d2d1055588c65d9a4 100644 (file)
@@ -959,42 +959,42 @@ LayerTestResult<uint8_t, 4> StridedSlice4dUint8Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return StridedSlice4dTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
+    return StridedSlice4dTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
 }
 
 LayerTestResult<uint8_t, 4> StridedSlice4dReverseUint8Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return StridedSlice4dReverseTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
+    return StridedSlice4dReverseTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
 }
 
 LayerTestResult<uint8_t, 4> StridedSliceSimpleStrideUint8Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return StridedSliceSimpleStrideTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
+    return StridedSliceSimpleStrideTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
 }
 
 LayerTestResult<uint8_t, 4> StridedSliceSimpleRangeMaskUint8Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return StridedSliceSimpleRangeMaskTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
+    return StridedSliceSimpleRangeMaskTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
 }
 
 LayerTestResult<uint8_t, 2> StridedSliceShrinkAxisMaskUint8Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return StridedSliceShrinkAxisMaskTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
+    return StridedSliceShrinkAxisMaskTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
 }
 
 LayerTestResult<uint8_t, 2> StridedSliceShrinkAxisMaskBitPosition0Dim3Uint8Test(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return StridedSliceShrinkAxisMaskBitPosition0Dim3Test<armnn::DataType::QuantisedAsymm8>(workloadFactory,
+    return StridedSliceShrinkAxisMaskBitPosition0Dim3Test<armnn::DataType::QAsymmU8>(workloadFactory,
                                                                                             memoryManager);
 }
 
@@ -1002,7 +1002,7 @@ LayerTestResult<uint8_t, 3> StridedSliceShrinkAxisMaskBitPosition0Uint8Test(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return StridedSliceShrinkAxisMaskBitPosition0Test<armnn::DataType::QuantisedAsymm8>(workloadFactory,
+    return StridedSliceShrinkAxisMaskBitPosition0Test<armnn::DataType::QAsymmU8>(workloadFactory,
                                                                                         memoryManager);
 }
 
@@ -1010,7 +1010,7 @@ LayerTestResult<uint8_t, 3> StridedSliceShrinkAxisMaskBitPosition1Uint8Test(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return StridedSliceShrinkAxisMaskBitPosition1Test<armnn::DataType::QuantisedAsymm8>(workloadFactory,
+    return StridedSliceShrinkAxisMaskBitPosition1Test<armnn::DataType::QAsymmU8>(workloadFactory,
                                                                                         memoryManager);
 }
 
@@ -1018,7 +1018,7 @@ LayerTestResult<uint8_t, 3> StridedSliceShrinkAxisMaskBitPosition2Uint8Test(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return StridedSliceShrinkAxisMaskBitPosition2Test<armnn::DataType::QuantisedAsymm8>(workloadFactory,
+    return StridedSliceShrinkAxisMaskBitPosition2Test<armnn::DataType::QAsymmU8>(workloadFactory,
                                                                                         memoryManager);
 }
 
@@ -1026,7 +1026,7 @@ LayerTestResult<uint8_t, 3> StridedSliceShrinkAxisMaskBitPosition3Uint8Test(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return StridedSliceShrinkAxisMaskBitPosition3Test<armnn::DataType::QuantisedAsymm8>(workloadFactory,
+    return StridedSliceShrinkAxisMaskBitPosition3Test<armnn::DataType::QAsymmU8>(workloadFactory,
                                                                                         memoryManager);
 }
 
@@ -1034,7 +1034,7 @@ LayerTestResult<uint8_t, 2> StridedSliceShrinkAxisMaskBitPosition0And1Uint8Test(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return StridedSliceShrinkAxisMaskBitPosition0And1Test<armnn::DataType::QuantisedAsymm8>(workloadFactory,
+    return StridedSliceShrinkAxisMaskBitPosition0And1Test<armnn::DataType::QAsymmU8>(workloadFactory,
                                                                                             memoryManager);
 }
 
@@ -1042,7 +1042,7 @@ LayerTestResult<uint8_t, 2> StridedSliceShrinkAxisMaskBitPosition0And2Uint8Test(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return StridedSliceShrinkAxisMaskBitPosition0And2Test<armnn::DataType::QuantisedAsymm8>(workloadFactory,
+    return StridedSliceShrinkAxisMaskBitPosition0And2Test<armnn::DataType::QAsymmU8>(workloadFactory,
                                                                                             memoryManager);
 }
 
@@ -1050,7 +1050,7 @@ LayerTestResult<uint8_t, 2> StridedSliceShrinkAxisMaskBitPosition0And3Uint8Test(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return StridedSliceShrinkAxisMaskBitPosition0And3Test<armnn::DataType::QuantisedAsymm8>(workloadFactory,
+    return StridedSliceShrinkAxisMaskBitPosition0And3Test<armnn::DataType::QAsymmU8>(workloadFactory,
                                                                                             memoryManager);
 }
 
@@ -1058,7 +1058,7 @@ LayerTestResult<uint8_t, 1> StridedSliceShrinkAxisMaskBitPosition0And1And3Uint8T
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return StridedSliceShrinkAxisMaskBitPosition0And1And3Test<armnn::DataType::QuantisedAsymm8>(workloadFactory,
+    return StridedSliceShrinkAxisMaskBitPosition0And1And3Test<armnn::DataType::QAsymmU8>(workloadFactory,
                                                                                                 memoryManager);
 }
 
@@ -1066,89 +1066,89 @@ LayerTestResult<uint8_t, 3> StridedSlice3dUint8Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return StridedSlice3dTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
+    return StridedSlice3dTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
 }
 
 LayerTestResult<uint8_t, 3> StridedSlice3dReverseUint8Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return StridedSlice3dReverseTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
+    return StridedSlice3dReverseTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
 }
 
 LayerTestResult<uint8_t, 2> StridedSlice2dUint8Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return StridedSlice2dTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
+    return StridedSlice2dTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
 }
 
 LayerTestResult<uint8_t, 2> StridedSlice2dReverseUint8Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return StridedSlice2dReverseTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
+    return StridedSlice2dReverseTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
 }
 
 LayerTestResult<int16_t, 4> StridedSlice4dInt16Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return StridedSlice4dTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
+    return StridedSlice4dTest<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
 }
 
 LayerTestResult<int16_t, 4> StridedSlice4dReverseInt16Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return StridedSlice4dReverseTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
+    return StridedSlice4dReverseTest<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
 }
 
 LayerTestResult<int16_t, 4> StridedSliceSimpleStrideInt16Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return StridedSliceSimpleStrideTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
+    return StridedSliceSimpleStrideTest<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
 }
 
 LayerTestResult<int16_t, 4> StridedSliceSimpleRangeMaskInt16Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return StridedSliceSimpleRangeMaskTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
+    return StridedSliceSimpleRangeMaskTest<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
 }
 
 LayerTestResult<int16_t, 2> StridedSliceShrinkAxisMaskInt16Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return StridedSliceShrinkAxisMaskTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
+    return StridedSliceShrinkAxisMaskTest<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
 }
 
 LayerTestResult<int16_t, 3> StridedSlice3dInt16Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return StridedSlice3dTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
+    return StridedSlice3dTest<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
 }
 
 LayerTestResult<int16_t, 3> StridedSlice3dReverseInt16Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return StridedSlice3dReverseTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
+    return StridedSlice3dReverseTest<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
 }
 
 LayerTestResult<int16_t, 2> StridedSlice2dInt16Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return StridedSlice2dTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
+    return StridedSlice2dTest<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
 }
 
 LayerTestResult<int16_t, 2> StridedSlice2dReverseInt16Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return StridedSlice2dReverseTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
+    return StridedSlice2dReverseTest<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
 }
index d18002163974ed87d61e7ed22d7ab5cd557a3c2f..525fb46d56fcd324d0f14f60a686904911a23134 100644 (file)
@@ -27,7 +27,7 @@ LayerTestResult<uint8_t, 4> SubtractionUint8Test(
     std::vector<uint8_t> input1 = {  1,  2,  1,  2 };
     std::vector<uint8_t> output = {  3,  3,  5,  5 };
 
-    return ElementwiseTestHelper<4, armnn::SubtractionQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
+    return ElementwiseTestHelper<4, armnn::SubtractionQueueDescriptor, armnn::DataType::QAsymmU8>(
         workloadFactory,
         memoryManager,
         shape0,
@@ -57,7 +57,7 @@ LayerTestResult<uint8_t, 4> SubtractionBroadcast1ElementUint8Test(
 
     std::vector<uint8_t> output = { 5, 6, 7, 8 };
 
-    return ElementwiseTestHelper<4, armnn::SubtractionQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
+    return ElementwiseTestHelper<4, armnn::SubtractionQueueDescriptor, armnn::DataType::QAsymmU8>(
         workloadFactory,
         memoryManager,
         shape0,
@@ -87,7 +87,7 @@ LayerTestResult<uint8_t, 4> SubtractionBroadcastUint8Test(
 
     std::vector<uint8_t> output = { 8, 11, 12, 15 };
 
-    return ElementwiseTestHelper<4, armnn::SubtractionQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
+    return ElementwiseTestHelper<4, armnn::SubtractionQueueDescriptor, armnn::DataType::QAsymmU8>(
         workloadFactory,
         memoryManager,
         shape0,
@@ -254,7 +254,7 @@ LayerTestResult<int16_t, 4> SubtractionInt16Test(
     std::vector<int16_t> input1 = {  1,  2,  1,  2 };
     std::vector<int16_t> output = {  3,  3,  5,  5 };
 
-    return ElementwiseTestHelper<4, armnn::SubtractionQueueDescriptor, armnn::DataType::QuantisedSymm16>(
+    return ElementwiseTestHelper<4, armnn::SubtractionQueueDescriptor, armnn::DataType::QSymmS16>(
         workloadFactory,
         memoryManager,
         shape,
@@ -284,7 +284,7 @@ LayerTestResult<int16_t, 4> SubtractionBroadcast1ElementInt16Test(
 
     std::vector<int16_t> output = { 3, 4, 5, 6 };
 
-    return ElementwiseTestHelper<4, armnn::SubtractionQueueDescriptor, armnn::DataType::QuantisedSymm16>(
+    return ElementwiseTestHelper<4, armnn::SubtractionQueueDescriptor, armnn::DataType::QSymmS16>(
         workloadFactory,
         memoryManager,
         shape0,
@@ -314,7 +314,7 @@ LayerTestResult<int16_t, 4> SubtractionBroadcastInt16Test(
 
     std::vector<int16_t> output = { 8, 11, 12, 15 };
 
-    return ElementwiseTestHelper<4, armnn::SubtractionQueueDescriptor, armnn::DataType::QuantisedSymm16>(
+    return ElementwiseTestHelper<4, armnn::SubtractionQueueDescriptor, armnn::DataType::QSymmS16>(
         workloadFactory,
         memoryManager,
         shape0,
index 4b4894f4d29233575cc8a72abe3042fbeafba6e2..3ac25f0534017094fa62ea5be0710aa59468cdcc 100644 (file)
@@ -566,7 +566,7 @@ LayerTestResult<uint8_t, 4> TransposeConvolution2dPerAxisQuantTest(
 {
    using namespace armnn;
 
-    const DataType inputType  = DataType::QuantisedAsymm8;
+    const DataType inputType  = DataType::QAsymmU8;
     const DataType kernelType = DataType::QuantizedSymm8PerAxis;
     const DataType biasType   = DataType::Signed32;
 
@@ -672,15 +672,15 @@ SimpleTransposeConvolution2dTest<armnn::DataType::Float32, armnn::DataType::Floa
     bool biasEnabled,
     const armnn::DataLayout layout);
 
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
-SimpleTransposeConvolution2dTest<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
+SimpleTransposeConvolution2dTest<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     bool biasEnabled,
     const armnn::DataLayout layout);
 
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
-SimpleTransposeConvolution2dTest<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
+SimpleTransposeConvolution2dTest<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     bool biasEnabled,
@@ -693,15 +693,15 @@ PaddedTransposeConvolution2dTest<armnn::DataType::Float32, armnn::DataType::Floa
     bool biasEnabled,
     const armnn::DataLayout layout);
 
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
-PaddedTransposeConvolution2dTest<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
+PaddedTransposeConvolution2dTest<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     bool biasEnabled,
     const armnn::DataLayout layout);
 
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
-PaddedTransposeConvolution2dTest<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
+PaddedTransposeConvolution2dTest<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     bool biasEnabled,
@@ -714,15 +714,15 @@ StridedTransposeConvolution2dTest<armnn::DataType::Float32, armnn::DataType::Flo
     bool biasEnabled,
     const armnn::DataLayout layout);
 
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
-StridedTransposeConvolution2dTest<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
+StridedTransposeConvolution2dTest<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     bool biasEnabled,
     const armnn::DataLayout layout);
 
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
-StridedTransposeConvolution2dTest<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
+StridedTransposeConvolution2dTest<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     bool biasEnabled,
@@ -734,14 +734,14 @@ MultiChannelTransposeConvolution2dTest<armnn::DataType::Float32, armnn::DataType
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     const armnn::DataLayout layout);
 
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
-MultiChannelTransposeConvolution2dTest<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
+MultiChannelTransposeConvolution2dTest<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     const armnn::DataLayout layout);
 
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
-MultiChannelTransposeConvolution2dTest<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
+MultiChannelTransposeConvolution2dTest<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     const armnn::DataLayout layout);
index bce91ab4626e577c7b447f9845a4917ab4e2d845..f7129d60358adc92610a75f4496cf0e85a6f0726 100644 (file)
@@ -195,7 +195,7 @@ bool ClLayerSupport::IsArgMinMaxSupported(const TensorInfo& input,
                                           const ArgMinMaxDescriptor& descriptor,
                                           Optional<std::string&> reasonIfUnsupported) const
 {
-    if (input.GetDataType() == DataType::QuantisedAsymm8)
+    if (input.GetDataType() == DataType::QAsymmU8)
     {
         return false;
     }
index 3c80ea9d49d66a65b3e5eabd31df4e8021b971ae..d79745c42071bb2196e16cf77cf2bb96bc614ce7 100644 (file)
@@ -127,7 +127,7 @@ BOOST_AUTO_TEST_CASE(CreateMultiplicationUint8WorkloadTest)
     ClCreateElementwiseWorkloadTest<ClMultiplicationWorkload,
                                     MultiplicationQueueDescriptor,
                                     MultiplicationLayer,
-                                    armnn::DataType::QuantisedAsymm8>();
+                                    armnn::DataType::QAsymmU8>();
 }
 
 BOOST_AUTO_TEST_CASE(CreateDivisionFloatWorkloadTest)
@@ -365,7 +365,7 @@ BOOST_AUTO_TEST_CASE(CreateDirectConvolution2dFloat16Workload)
 
 BOOST_AUTO_TEST_CASE(CreateDirectConvolution2dUint8Workload)
 {
-    ClDirectConvolution2dWorkloadTest<ClConvolution2dWorkload, armnn::DataType::QuantisedAsymm8>();
+    ClDirectConvolution2dWorkloadTest<ClConvolution2dWorkload, armnn::DataType::QAsymmU8>();
 }
 
 template <typename FullyConnectedWorkloadType, typename armnn::DataType DataType>
@@ -522,7 +522,7 @@ BOOST_AUTO_TEST_CASE(CreatePreluFloatWorkload)
 
 BOOST_AUTO_TEST_CASE(CreatePreluUint8Workload)
 {
-    ClCreatePreluWorkloadTest({ 1, 4, 1, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 }, DataType::QuantisedAsymm8);
+    ClCreatePreluWorkloadTest({ 1, 4, 1, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 }, DataType::QAsymmU8);
 }
 
 template <typename armnn::DataType DataType>
@@ -555,7 +555,7 @@ BOOST_AUTO_TEST_CASE(CreateReshapeFloat16Workload)
 
 BOOST_AUTO_TEST_CASE(CreateReshapeUint8Workload)
 {
-    ClCreateReshapeWorkloadTest<armnn::DataType::QuantisedAsymm8>();
+    ClCreateReshapeWorkloadTest<armnn::DataType::QAsymmU8>();
 }
 
 template <typename SoftmaxWorkloadType, typename armnn::DataType DataType>
@@ -834,7 +834,7 @@ BOOST_AUTO_TEST_CASE(CreateResizeFloat16NchwWorkload)
 
 BOOST_AUTO_TEST_CASE(CreateResizeUint8NchwWorkload)
 {
-    ClResizeWorkloadTest<ClResizeWorkload, armnn::DataType::QuantisedAsymm8>(DataLayout::NCHW);
+    ClResizeWorkloadTest<ClResizeWorkload, armnn::DataType::QAsymmU8>(DataLayout::NCHW);
 }
 
 BOOST_AUTO_TEST_CASE(CreateResizeFloat32NhwcWorkload)
@@ -849,7 +849,7 @@ BOOST_AUTO_TEST_CASE(CreateResizeFloat16NhwcWorkload)
 
 BOOST_AUTO_TEST_CASE(CreateResizeUint8NhwcWorkload)
 {
-    ClResizeWorkloadTest<ClResizeWorkload, armnn::DataType::QuantisedAsymm8>(DataLayout::NHWC);
+    ClResizeWorkloadTest<ClResizeWorkload, armnn::DataType::QAsymmU8>(DataLayout::NHWC);
 }
 
 template <typename MeanWorkloadType, typename armnn::DataType DataType>
@@ -883,7 +883,7 @@ BOOST_AUTO_TEST_CASE(CreateMeanFloat16Workload)
 
 BOOST_AUTO_TEST_CASE(CreateMeanUint8Workload)
 {
-    ClMeanWorkloadTest<ClMeanWorkload, armnn::DataType::QuantisedAsymm8>();
+    ClMeanWorkloadTest<ClMeanWorkload, armnn::DataType::QAsymmU8>();
 }
 
 template <typename ConcatWorkloadType, armnn::DataType DataType>
@@ -923,17 +923,17 @@ BOOST_AUTO_TEST_CASE(CreateConcatDim3Float32Workload)
 
 BOOST_AUTO_TEST_CASE(CreateConcatDim0Uint8Workload)
 {
-    ClCreateConcatWorkloadTest<ClConcatWorkload, armnn::DataType::QuantisedAsymm8>({ 4, 3, 2, 5 }, 0);
+    ClCreateConcatWorkloadTest<ClConcatWorkload, armnn::DataType::QAsymmU8>({ 4, 3, 2, 5 }, 0);
 }
 
 BOOST_AUTO_TEST_CASE(CreateConcatDim1Uint8Workload)
 {
-    ClCreateConcatWorkloadTest<ClConcatWorkload, armnn::DataType::QuantisedAsymm8>({ 2, 6, 2, 5 }, 1);
+    ClCreateConcatWorkloadTest<ClConcatWorkload, armnn::DataType::QAsymmU8>({ 2, 6, 2, 5 }, 1);
 }
 
 BOOST_AUTO_TEST_CASE(CreateConcatDim3Uint8Workload)
 {
-    ClCreateConcatWorkloadTest<ClConcatWorkload, armnn::DataType::QuantisedAsymm8>({ 2, 3, 2, 10 }, 3);
+    ClCreateConcatWorkloadTest<ClConcatWorkload, armnn::DataType::QAsymmU8>({ 2, 3, 2, 10 }, 3);
 }
 
 template <typename SpaceToDepthWorkloadType, typename armnn::DataType DataType>
@@ -965,12 +965,12 @@ BOOST_AUTO_TEST_CASE(CreateSpaceToDepthFloat16Workload)
 
 BOOST_AUTO_TEST_CASE(CreateSpaceToDepthQAsymm8Workload)
 {
-    ClSpaceToDepthWorkloadTest<ClSpaceToDepthWorkload, armnn::DataType::QuantisedAsymm8>();
+    ClSpaceToDepthWorkloadTest<ClSpaceToDepthWorkload, armnn::DataType::QAsymmU8>();
 }
 
 BOOST_AUTO_TEST_CASE(CreateSpaceToDepthQSymm16Workload)
 {
-    ClSpaceToDepthWorkloadTest<ClSpaceToDepthWorkload, armnn::DataType::QuantisedSymm16>();
+    ClSpaceToDepthWorkloadTest<ClSpaceToDepthWorkload, armnn::DataType::QSymmS16>();
 }
 
 template <armnn::DataType DataType>
@@ -1013,7 +1013,7 @@ BOOST_AUTO_TEST_CASE(CreateStackFloat16Workload)
 
 BOOST_AUTO_TEST_CASE(CreateStackUint8Workload)
 {
-    ClCreateStackWorkloadTest<armnn::DataType::QuantisedAsymm8>({ 3, 4, 5 }, { 3, 4, 2, 5 }, 2, 2);
+    ClCreateStackWorkloadTest<armnn::DataType::QAsymmU8>({ 3, 4, 5 }, { 3, 4, 2, 5 }, 2, 2);
 }
 
 template <typename QuantizedLstmWorkloadType>
index 5d7a2f5666499f3e3b7916a5719cce273c4ee8ce..260f8f68cd4691c6bb84773ceacab1e69efa78a9 100644 (file)
@@ -43,7 +43,7 @@ BOOST_AUTO_TEST_CASE(ClConcatEndToEndDim0Test)
 
 BOOST_AUTO_TEST_CASE(ClConcatEndToEndDim0Uint8Test)
 {
-    ConcatDim0EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+    ConcatDim0EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
 BOOST_AUTO_TEST_CASE(ClConcatEndToEndDim1Test)
@@ -53,7 +53,7 @@ BOOST_AUTO_TEST_CASE(ClConcatEndToEndDim1Test)
 
 BOOST_AUTO_TEST_CASE(ClConcatEndToEndDim1Uint8Test)
 {
-    ConcatDim1EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+    ConcatDim1EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
 BOOST_AUTO_TEST_CASE(ClConcatEndToEndDim3Test)
@@ -63,7 +63,7 @@ BOOST_AUTO_TEST_CASE(ClConcatEndToEndDim3Test)
 
 BOOST_AUTO_TEST_CASE(ClConcatEndToEndDim3Uint8Test)
 {
-    ConcatDim3EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+    ConcatDim3EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
 // DepthToSpace
@@ -79,12 +79,12 @@ BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNchwFloat16)
 
 BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNchwUint8)
 {
-    DepthToSpaceEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends, armnn::DataLayout::NCHW);
+    DepthToSpaceEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, armnn::DataLayout::NCHW);
 }
 
 BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNchwInt16)
 {
-    DepthToSpaceEndToEnd<armnn::DataType::QuantisedSymm16>(defaultBackends, armnn::DataLayout::NCHW);
+    DepthToSpaceEndToEnd<armnn::DataType::QSymmS16>(defaultBackends, armnn::DataLayout::NCHW);
 }
 
 BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNhwcFloat32)
@@ -99,23 +99,23 @@ BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNhwcFloat16)
 
 BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNhwcUint8)
 {
-    DepthToSpaceEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends, armnn::DataLayout::NHWC);
+    DepthToSpaceEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, armnn::DataLayout::NHWC);
 }
 
 BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNhwcInt16)
 {
-    DepthToSpaceEndToEnd<armnn::DataType::QuantisedSymm16>(defaultBackends, armnn::DataLayout::NHWC);
+    DepthToSpaceEndToEnd<armnn::DataType::QSymmS16>(defaultBackends, armnn::DataLayout::NHWC);
 }
 
 // Dequantize
 BOOST_AUTO_TEST_CASE(DequantizeEndToEndSimpleTest)
 {
-    DequantizeEndToEndSimple<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+    DequantizeEndToEndSimple<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
 BOOST_AUTO_TEST_CASE(DequantizeEndToEndOffsetTest)
 {
-    DequantizeEndToEndOffset<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+    DequantizeEndToEndOffset<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
 BOOST_AUTO_TEST_CASE(ClGreaterSimpleEndToEndTest)
@@ -133,7 +133,7 @@ BOOST_AUTO_TEST_CASE(ClGreaterSimpleEndToEndUint8Test)
     const std::vector<uint8_t> expectedOutput({ 0, 0, 0, 0,  1, 1, 1, 1,
                                                 0, 0, 0, 0,  0, 0, 0, 0 });
 
-    ComparisonSimpleEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends,
+    ComparisonSimpleEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends,
                                                                ComparisonOperation::Greater,
                                                                expectedOutput);
 }
@@ -153,7 +153,7 @@ BOOST_AUTO_TEST_CASE(ClGreaterBroadcastEndToEndUint8Test)
     const std::vector<uint8_t> expectedOutput({ 0, 1, 0, 0, 0, 1,
                                                 1, 1, 1, 1, 1, 1 });
 
-    ComparisonBroadcastEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends,
+    ComparisonBroadcastEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends,
                                                                   ComparisonOperation::Greater,
                                                                   expectedOutput);
 }
@@ -186,7 +186,7 @@ BOOST_AUTO_TEST_CASE(ClPreluEndToEndFloat32Test)
 
 BOOST_AUTO_TEST_CASE(ClPreluEndToEndTestUint8)
 {
-    PreluEndToEndPositiveTest<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+    PreluEndToEndPositiveTest<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
 BOOST_AUTO_TEST_CASE(ClSpaceToDepthNhwcEndToEndTest1)
@@ -216,7 +216,7 @@ BOOST_AUTO_TEST_CASE(ClSplitter1dEndToEndTest)
 
 BOOST_AUTO_TEST_CASE(ClSplitter1dEndToEndUint8Test)
 {
-    Splitter1dEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+    Splitter1dEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
 BOOST_AUTO_TEST_CASE(ClSplitter2dDim0EndToEndTest)
@@ -231,12 +231,12 @@ BOOST_AUTO_TEST_CASE(ClSplitter2dDim1EndToEndTest)
 
 BOOST_AUTO_TEST_CASE(ClSplitter2dDim0EndToEndUint8Test)
 {
-    Splitter2dDim0EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+    Splitter2dDim0EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
 BOOST_AUTO_TEST_CASE(ClSplitter2dDim1EndToEndUint8Test)
 {
-    Splitter2dDim1EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+    Splitter2dDim1EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
 BOOST_AUTO_TEST_CASE(ClSplitter3dDim0EndToEndTest)
@@ -256,17 +256,17 @@ BOOST_AUTO_TEST_CASE(ClSplitter3dDim2EndToEndTest)
 
 BOOST_AUTO_TEST_CASE(ClSplitter3dDim0EndToEndUint8Test)
 {
-    Splitter3dDim0EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+    Splitter3dDim0EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
 BOOST_AUTO_TEST_CASE(ClSplitter3dDim1EndToEndUint8Test)
 {
-    Splitter3dDim1EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+    Splitter3dDim1EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
 BOOST_AUTO_TEST_CASE(ClSplitter3dDim2EndToEndUint8Test)
 {
-    Splitter3dDim2EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+    Splitter3dDim2EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
 BOOST_AUTO_TEST_CASE(ClSplitter4dDim0EndToEndTest)
@@ -291,22 +291,22 @@ BOOST_AUTO_TEST_CASE(ClSplitter4dDim3EndToEndTest)
 
 BOOST_AUTO_TEST_CASE(ClSplitter4dDim0EndToEndUint8Test)
 {
-    Splitter4dDim0EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+    Splitter4dDim0EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
 BOOST_AUTO_TEST_CASE(ClSplitter4dDim1EndToEndUint8Test)
 {
-    Splitter4dDim1EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+    Splitter4dDim1EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
 BOOST_AUTO_TEST_CASE(ClSplitter4dDim2EndToEndUint8Test)
 {
-    Splitter4dDim2EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+    Splitter4dDim2EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
 BOOST_AUTO_TEST_CASE(ClSplitter4dDim3EndToEndUint8Test)
 {
-    Splitter4dDim3EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+    Splitter4dDim3EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
 // TransposeConvolution2d
@@ -318,7 +318,7 @@ BOOST_AUTO_TEST_CASE(ClTransposeConvolution2dEndToEndFloatNchwTest)
 
 BOOST_AUTO_TEST_CASE(ClTransposeConvolution2dEndToEndUint8NchwTest)
 {
-    TransposeConvolution2dEndToEnd<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
+    TransposeConvolution2dEndToEnd<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
         defaultBackends, armnn::DataLayout::NCHW);
 }
 
@@ -330,7 +330,7 @@ BOOST_AUTO_TEST_CASE(ClTransposeConvolution2dEndToEndFloatNhwcTest)
 
 BOOST_AUTO_TEST_CASE(ClTransposeConvolution2dEndToEndUint8NhwcTest)
 {
-    TransposeConvolution2dEndToEnd<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
+    TransposeConvolution2dEndToEnd<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
         defaultBackends, armnn::DataLayout::NHWC);
 }
 
index bcf057b1fb791a1f5fb347cbeefdca567879a123..35e11721613934ad099a5d90bf983b3bbfc6c193 100644 (file)
@@ -40,7 +40,7 @@ BOOST_FIXTURE_TEST_CASE(IsLayerSupportedUint8Cl, ClContextControlFixture)
 {
     armnn::ClWorkloadFactory factory =
         ClWorkloadFactoryHelper::GetFactory(ClWorkloadFactoryHelper::GetMemoryManager());
-    IsLayerSupportedTests<armnn::ClWorkloadFactory, armnn::DataType::QuantisedAsymm8>(&factory);
+    IsLayerSupportedTests<armnn::ClWorkloadFactory, armnn::DataType::QAsymmU8>(&factory);
 }
 
 BOOST_FIXTURE_TEST_CASE(IsConvertFp16ToFp32SupportedCl, ClContextControlFixture)
index c821dd94c306f3c20cd0a0d23f9335b459da71b4..76e5174cb7b17e6e5f3fb0f47fec1f0df976f9a2 100644 (file)
@@ -55,20 +55,20 @@ ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwFloat321, BatchToSpaceNdNchwTest1<DataTyp
 ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwFloat322, BatchToSpaceNdNchwTest2<DataType::Float32>)
 ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwFloat323, BatchToSpaceNdNchwTest3<DataType::Float32>)
 
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcUint1, BatchToSpaceNdNhwcTest1<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcUint2, BatchToSpaceNdNhwcTest2<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcUint3, BatchToSpaceNdNhwcTest3<DataType::QuantisedAsymm8>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcUint1, BatchToSpaceNdNhwcTest1<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcUint2, BatchToSpaceNdNhwcTest2<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcUint3, BatchToSpaceNdNhwcTest3<DataType::QAsymmU8>)
 
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwUint1, BatchToSpaceNdNchwTest1<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwUint2, BatchToSpaceNdNchwTest2<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwUint3, BatchToSpaceNdNchwTest3<DataType::QuantisedAsymm8>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwUint1, BatchToSpaceNdNchwTest1<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwUint2, BatchToSpaceNdNchwTest2<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwUint3, BatchToSpaceNdNchwTest3<DataType::QAsymmU8>)
 
 // Fully Connected
 ARMNN_AUTO_TEST_CASE(SimpleFullyConnected, FullyConnectedFloat32Test, false, false)
 ARMNN_AUTO_TEST_CASE(SimpleFullyConnectedWithBias, FullyConnectedFloat32Test, true, false)
 ARMNN_AUTO_TEST_CASE(SimpleFullyConnectedWithTranspose, FullyConnectedFloat32Test, false, true)
-ARMNN_AUTO_TEST_CASE(FullyConnectedUint8, FullyConnectedTest<DataType::QuantisedAsymm8>, false)
-ARMNN_AUTO_TEST_CASE(FullyConnectedBiasedUint8, FullyConnectedTest<DataType::QuantisedAsymm8>, true)
+ARMNN_AUTO_TEST_CASE(FullyConnectedUint8, FullyConnectedTest<DataType::QAsymmU8>, false)
+ARMNN_AUTO_TEST_CASE(FullyConnectedBiasedUint8, FullyConnectedTest<DataType::QAsymmU8>, true)
 
 ARMNN_AUTO_TEST_CASE(FullyConnectedLarge, FullyConnectedLargeTest, false)
 ARMNN_AUTO_TEST_CASE(FullyConnectedLargeTransposed, FullyConnectedLargeTest, true)
@@ -104,11 +104,11 @@ ARMNN_AUTO_TEST_CASE(Convolution2d3x3Dilation3x3Nhwc,
                      false,
                      DataLayout::NHWC)
 ARMNN_AUTO_TEST_CASE(Convolution2d3x3Dilation3x3Uint8,
-                     Convolution2d3x3Dilation3x3Test<DataType::QuantisedAsymm8, DataType::Signed32>,
+                     Convolution2d3x3Dilation3x3Test<DataType::QAsymmU8, DataType::Signed32>,
                      false,
                      DataLayout::NCHW)
 ARMNN_AUTO_TEST_CASE(Convolution2d3x3Dilation3x3NhwcUint8,
-                     Convolution2d3x3Dilation3x3Test<DataType::QuantisedAsymm8, DataType::Signed32>,
+                     Convolution2d3x3Dilation3x3Test<DataType::QAsymmU8, DataType::Signed32>,
                      false,
                      DataLayout::NHWC)
 ARMNN_AUTO_TEST_CASE(Convolution2d2x3x3Dilation3x3,
@@ -120,11 +120,11 @@ ARMNN_AUTO_TEST_CASE(Convolution2d2x3x3Dilation3x3Nhwc,
                      false,
                      DataLayout::NHWC)
 ARMNN_AUTO_TEST_CASE(Convolution2d2x3x3Dilation3x3Uint8,
-                     Convolution2d2x3x3Dilation3x3Test<DataType::QuantisedAsymm8, DataType::Signed32>,
+                     Convolution2d2x3x3Dilation3x3Test<DataType::QAsymmU8, DataType::Signed32>,
                      false,
                      DataLayout::NCHW)
 ARMNN_AUTO_TEST_CASE(Convolution2d2x3x3Dilation3x3NhwcUint8,
-                     Convolution2d2x3x3Dilation3x3Test<DataType::QuantisedAsymm8, DataType::Signed32>,
+                     Convolution2d2x3x3Dilation3x3Test<DataType::QAsymmU8, DataType::Signed32>,
                      false,
                      DataLayout::NHWC)
 ARMNN_AUTO_TEST_CASE(Convolution2d2x2Dilation2x2Padding2x2Stride3x3,
@@ -136,11 +136,11 @@ ARMNN_AUTO_TEST_CASE(Convolution2d2x2Dilation2x2Padding2x2Stride3x3Nhwc,
                      false,
                      DataLayout::NHWC)
 ARMNN_AUTO_TEST_CASE(Convolution2d2x2Dilation2x2Padding2x2Stride3x3Uint8,
-                     Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<DataType::QuantisedAsymm8, DataType::Signed32>,
+                     Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<DataType::QAsymmU8, DataType::Signed32>,
                      false,
                      DataLayout::NCHW)
 ARMNN_AUTO_TEST_CASE(Convolution2d2x2Dilation2x2Padding2x2Stride3x3NhwcUint8,
-                     Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<DataType::QuantisedAsymm8, DataType::Signed32>,
+                     Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<DataType::QAsymmU8, DataType::Signed32>,
                      false,
                      DataLayout::NHWC)
 
@@ -199,7 +199,7 @@ ARMNN_AUTO_TEST_CASE(CopyViaSplitterUint8, CopyViaSplitterUint8Test)
 ARMNN_AUTO_TEST_CASE(SimpleConcat, ConcatTest)
 ARMNN_AUTO_TEST_CASE(ConcatUint8, ConcatUint8Test)
 ARMNN_AUTO_TEST_CASE(ConcatUint8DifferentInputOutputQParam,
-                     ConcatDifferentInputOutputQParamTest<DataType::QuantisedAsymm8>, false)
+                     ConcatDifferentInputOutputQParamTest<DataType::QAsymmU8>, false)
 
 // Normalization
 ARMNN_AUTO_TEST_CASE(SimpleNormalizationAcross, SimpleNormalizationAcrossTest)
@@ -388,15 +388,15 @@ ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwFloat16_2, DepthToSpaceTest2<DataType::Floa
 ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwFloat16_3, DepthToSpaceTest3<DataType::Float16>, DataLayout::NCHW);
 ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwFloat16_4, DepthToSpaceTest4<DataType::Float16>, DataLayout::NCHW);
 
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwUint8_1, DepthToSpaceTest1<DataType::QuantisedAsymm8>, DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwUint8_2, DepthToSpaceTest2<DataType::QuantisedAsymm8>, DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwUint8_3, DepthToSpaceTest3<DataType::QuantisedAsymm8>, DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwUint8_4, DepthToSpaceTest4<DataType::QuantisedAsymm8>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwUint8_1, DepthToSpaceTest1<DataType::QAsymmU8>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwUint8_2, DepthToSpaceTest2<DataType::QAsymmU8>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwUint8_3, DepthToSpaceTest3<DataType::QAsymmU8>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwUint8_4, DepthToSpaceTest4<DataType::QAsymmU8>, DataLayout::NCHW);
 
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt16_1, DepthToSpaceTest1<DataType::QuantisedSymm16>, DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt16_2, DepthToSpaceTest2<DataType::QuantisedSymm16>, DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt16_3, DepthToSpaceTest3<DataType::QuantisedSymm16>, DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt16_4, DepthToSpaceTest4<DataType::QuantisedSymm16>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt16_1, DepthToSpaceTest1<DataType::QSymmS16>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt16_2, DepthToSpaceTest2<DataType::QSymmS16>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt16_3, DepthToSpaceTest3<DataType::QSymmS16>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt16_4, DepthToSpaceTest4<DataType::QSymmS16>, DataLayout::NCHW);
 
 ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcFloat32_1, DepthToSpaceTest1<DataType::Float32>, DataLayout::NHWC);
 ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcFloat32_2, DepthToSpaceTest2<DataType::Float32>, DataLayout::NHWC);
@@ -408,22 +408,22 @@ ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcFloat16_2, DepthToSpaceTest2<DataType::Floa
 ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcFloat16_3, DepthToSpaceTest3<DataType::Float16>, DataLayout::NHWC);
 ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcFloat16_4, DepthToSpaceTest4<DataType::Float16>, DataLayout::NHWC);
 
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcUint8_1, DepthToSpaceTest1<DataType::QuantisedAsymm8>, DataLayout::NHWC);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcUint8_2, DepthToSpaceTest2<DataType::QuantisedAsymm8>, DataLayout::NHWC);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcUint8_3, DepthToSpaceTest3<DataType::QuantisedAsymm8>, DataLayout::NHWC);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcUint8_4, DepthToSpaceTest4<DataType::QuantisedAsymm8>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcUint8_1, DepthToSpaceTest1<DataType::QAsymmU8>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcUint8_2, DepthToSpaceTest2<DataType::QAsymmU8>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcUint8_3, DepthToSpaceTest3<DataType::QAsymmU8>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcUint8_4, DepthToSpaceTest4<DataType::QAsymmU8>, DataLayout::NHWC);
 
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt16_1, DepthToSpaceTest1<DataType::QuantisedSymm16>, DataLayout::NHWC);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt16_2, DepthToSpaceTest2<DataType::QuantisedSymm16>, DataLayout::NHWC);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt16_3, DepthToSpaceTest3<DataType::QuantisedSymm16>, DataLayout::NHWC);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt16_4, DepthToSpaceTest4<DataType::QuantisedSymm16>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt16_1, DepthToSpaceTest1<DataType::QSymmS16>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt16_2, DepthToSpaceTest2<DataType::QSymmS16>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt16_3, DepthToSpaceTest3<DataType::QSymmS16>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt16_4, DepthToSpaceTest4<DataType::QSymmS16>, DataLayout::NHWC);
 
 // Floor
 ARMNN_AUTO_TEST_CASE(SimpleFloor, SimpleFloorTest<DataType::Float32>)
 
 // Reshape
 ARMNN_AUTO_TEST_CASE(SimpleReshapeFloat32, SimpleReshapeTest<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE(SimpleReshapeUint8, SimpleReshapeTest<DataType::QuantisedAsymm8>)
+ARMNN_AUTO_TEST_CASE(SimpleReshapeUint8, SimpleReshapeTest<DataType::QAsymmU8>)
 ARMNN_AUTO_TEST_CASE(Reshape5d, Reshape5dTest<DataType::Float32>)
 
 // Pad
@@ -437,24 +437,24 @@ ARMNN_AUTO_TEST_CASE(PadUint82dCustomPadding, PadUint82dCustomPaddingTest)
 ARMNN_AUTO_TEST_CASE(PadUint83d, PadUint83dTest)
 ARMNN_AUTO_TEST_CASE(PadUint84d, PadUint84dTest)
 
-ARMNN_AUTO_TEST_CASE(Pad2dQSymm16, Pad2dTestCommon<DataType::QuantisedSymm16>, 2.0f, 0, 0.0f)
-ARMNN_AUTO_TEST_CASE(Pad2dQSymm16CustomPadding, Pad2dTestCommon<DataType::QuantisedSymm16>, 2.0f, 0, 1.0f)
-ARMNN_AUTO_TEST_CASE(Pad3dQSymm16, Pad3dTestCommon<DataType::QuantisedSymm16>, 2.0f, 0)
-ARMNN_AUTO_TEST_CASE(Pad4dQSymm16, Pad4dTestCommon<DataType::QuantisedSymm16>, 2.0f, 0)
+ARMNN_AUTO_TEST_CASE(Pad2dQSymm16, Pad2dTestCommon<DataType::QSymmS16>, 2.0f, 0, 0.0f)
+ARMNN_AUTO_TEST_CASE(Pad2dQSymm16CustomPadding, Pad2dTestCommon<DataType::QSymmS16>, 2.0f, 0, 1.0f)
+ARMNN_AUTO_TEST_CASE(Pad3dQSymm16, Pad3dTestCommon<DataType::QSymmS16>, 2.0f, 0)
+ARMNN_AUTO_TEST_CASE(Pad4dQSymm16, Pad4dTestCommon<DataType::QSymmS16>, 2.0f, 0)
 
 // PReLU
 ARMNN_AUTO_TEST_CASE(PreluFloat32, PreluTest<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE(PreluUint8,   PreluTest<DataType::QuantisedAsymm8>)
+ARMNN_AUTO_TEST_CASE(PreluUint8,   PreluTest<DataType::QAsymmU8>)
 
 // Permute
 ARMNN_AUTO_TEST_CASE(SimplePermuteFloat32, SimplePermuteTest<DataType::Float32>)
 ARMNN_AUTO_TEST_CASE(PermuteFloat32ValueSet1Test, PermuteValueSet1Test<DataType::Float32>)
 ARMNN_AUTO_TEST_CASE(PermuteFloat32ValueSet2Test, PermuteValueSet2Test<DataType::Float32>)
 ARMNN_AUTO_TEST_CASE(PermuteFloat32ValueSet3Test, PermuteValueSet3Test<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE(SimplePermuteQASymm8, SimplePermuteTest<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(PermuteQASymm8ValueSet1Test, PermuteValueSet1Test<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(PermuteQASymm8ValueSet2Test, PermuteValueSet2Test<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(PermuteQASymm8ValueSet3Test, PermuteValueSet3Test<DataType::QuantisedAsymm8>)
+ARMNN_AUTO_TEST_CASE(SimplePermuteQASymm8, SimplePermuteTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(PermuteQASymm8ValueSet1Test, PermuteValueSet1Test<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(PermuteQASymm8ValueSet2Test, PermuteValueSet2Test<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(PermuteQASymm8ValueSet3Test, PermuteValueSet3Test<DataType::QAsymmU8>)
 
 // Lstm
 ARMNN_AUTO_TEST_CASE(LstmLayerFloat32WithCifgWithPeepholeNoProjection,
@@ -493,13 +493,13 @@ ARMNN_AUTO_TEST_CASE(MeanVts1Float32, MeanVts1Test<DataType::Float32>)
 ARMNN_AUTO_TEST_CASE(MeanVts2Float32, MeanVts2Test<DataType::Float32>)
 ARMNN_AUTO_TEST_CASE(MeanVts3Float32, MeanVts3Test<DataType::Float32>)
 
-ARMNN_AUTO_TEST_CASE(MeanSimpleQuantisedAsymm8, MeanSimpleTest<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(MeanSimpleAxisQuantisedAsymm8, MeanSimpleAxisTest<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(MeanKeepDimsQuantisedAsymm8, MeanKeepDimsTest<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(MeanMultipleDimsQuantisedAsymm8, MeanMultipleDimsTest<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(MeanVts1QuantisedAsymm8, MeanVts1Test<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(MeanVts2QuantisedAsymm8, MeanVts2Test<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(MeanVts3QuantisedAsymm8, MeanVts3Test<DataType::QuantisedAsymm8>)
+ARMNN_AUTO_TEST_CASE(MeanSimpleQuantisedAsymm8, MeanSimpleTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(MeanSimpleAxisQuantisedAsymm8, MeanSimpleAxisTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(MeanKeepDimsQuantisedAsymm8, MeanKeepDimsTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(MeanMultipleDimsQuantisedAsymm8, MeanMultipleDimsTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(MeanVts1QuantisedAsymm8, MeanVts1Test<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(MeanVts2QuantisedAsymm8, MeanVts2Test<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(MeanVts3QuantisedAsymm8, MeanVts3Test<DataType::QAsymmU8>)
 
 // Minimum
 ARMNN_AUTO_TEST_CASE(MinimumBroadcast1Element1, MinimumBroadcast1ElementTest1)
@@ -639,25 +639,25 @@ ARMNN_AUTO_TEST_CASE(SimpleResizeBilinear,
                      SimpleResizeBilinearTest<DataType::Float32>,
                      DataLayout::NCHW)
 ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearUint8,
-                     SimpleResizeBilinearTest<DataType::QuantisedAsymm8>,
+                     SimpleResizeBilinearTest<DataType::QAsymmU8>,
                      DataLayout::NCHW)
 ARMNN_AUTO_TEST_CASE(ResizeBilinearNop,
                      ResizeBilinearNopTest<DataType::Float32>,
                      DataLayout::NCHW)
 ARMNN_AUTO_TEST_CASE(ResizeBilinearNopUint8,
-                     ResizeBilinearNopTest<DataType::QuantisedAsymm8>,
+                     ResizeBilinearNopTest<DataType::QAsymmU8>,
                      DataLayout::NCHW)
 ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMin,
                      ResizeBilinearSqMinTest<DataType::Float32>,
                      DataLayout::NCHW)
 ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinUint8,
-                     ResizeBilinearSqMinTest<DataType::QuantisedAsymm8>,
+                     ResizeBilinearSqMinTest<DataType::QAsymmU8>,
                      DataLayout::NCHW)
 ARMNN_AUTO_TEST_CASE(ResizeBilinearMin,
                      ResizeBilinearMinTest<DataType::Float32>,
                      DataLayout::NCHW)
 ARMNN_AUTO_TEST_CASE(ResizeBilinearMinUint8,
-                     ResizeBilinearMinTest<DataType::QuantisedAsymm8>,
+                     ResizeBilinearMinTest<DataType::QAsymmU8>,
                      DataLayout::NCHW)
 
 // Resize Bilinear - NHWC
@@ -665,25 +665,25 @@ ARMNN_AUTO_TEST_CASE(ResizeBilinearNopNhwc,
                      ResizeBilinearNopTest<DataType::Float32>,
                      DataLayout::NHWC)
 ARMNN_AUTO_TEST_CASE(ResizeBilinearNopUint8Nhwc,
-                     ResizeBilinearNopTest<DataType::QuantisedAsymm8>,
+                     ResizeBilinearNopTest<DataType::QAsymmU8>,
                      DataLayout::NHWC)
 ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearNhwc,
                      SimpleResizeBilinearTest<DataType::Float32>,
                      DataLayout::NHWC)
 ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearUint8Nhwc,
-                     SimpleResizeBilinearTest<DataType::QuantisedAsymm8>,
+                     SimpleResizeBilinearTest<DataType::QAsymmU8>,
                      DataLayout::NHWC)
 ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinNhwc,
                      ResizeBilinearSqMinTest<DataType::Float32>,
                      DataLayout::NHWC)
 ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinUint8Nhwc,
-                     ResizeBilinearSqMinTest<DataType::QuantisedAsymm8>,
+                     ResizeBilinearSqMinTest<DataType::QAsymmU8>,
                      DataLayout::NHWC)
 ARMNN_AUTO_TEST_CASE(ResizeBilinearMinNhwc,
                      ResizeBilinearMinTest<DataType::Float32>,
                      DataLayout::NHWC)
 ARMNN_AUTO_TEST_CASE(ResizeBilinearMinUint8Nhwc,
-                     ResizeBilinearMinTest<DataType::QuantisedAsymm8>,
+                     ResizeBilinearMinTest<DataType::QAsymmU8>,
                      DataLayout::NHWC)
 
 // Resize NearestNeighbor - NCHW
@@ -691,31 +691,31 @@ ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighbor,
                      SimpleResizeNearestNeighborTest<DataType::Float32>,
                      DataLayout::NCHW)
 ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighborUint8,
-                     SimpleResizeNearestNeighborTest<DataType::QuantisedAsymm8>,
+                     SimpleResizeNearestNeighborTest<DataType::QAsymmU8>,
                      DataLayout::NCHW)
 ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborNop,
                      ResizeNearestNeighborNopTest<DataType::Float32>,
                      DataLayout::NCHW)
 ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborNopUint8,
-                     ResizeNearestNeighborNopTest<DataType::QuantisedAsymm8>,
+                     ResizeNearestNeighborNopTest<DataType::QAsymmU8>,
                      DataLayout::NCHW)
 ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMin,
                      ResizeNearestNeighborSqMinTest<DataType::Float32>,
                      DataLayout::NCHW)
 ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMinUint8,
-                     ResizeNearestNeighborSqMinTest<DataType::QuantisedAsymm8>,
+                     ResizeNearestNeighborSqMinTest<DataType::QAsymmU8>,
                      DataLayout::NCHW)
 ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMin,
                      ResizeNearestNeighborMinTest<DataType::Float32>,
                      DataLayout::NCHW)
 ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMinUint8,
-                     ResizeNearestNeighborMinTest<DataType::QuantisedAsymm8>,
+                     ResizeNearestNeighborMinTest<DataType::QAsymmU8>,
                      DataLayout::NCHW)
 ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMag,
                      ResizeNearestNeighborMagTest<DataType::Float32>,
                      DataLayout::NCHW, 0.1f, 50, 0.1f, 50)
 ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMagUint8,
-                     ResizeNearestNeighborMagTest<DataType::QuantisedAsymm8>,
+                     ResizeNearestNeighborMagTest<DataType::QAsymmU8>,
                      DataLayout::NCHW, 0.1f, 50, 0.1f, 50)
 
 // Resize NearestNeighbor - NHWC
@@ -723,31 +723,31 @@ ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborNopNhwc,
                      ResizeNearestNeighborNopTest<DataType::Float32>,
                      DataLayout::NHWC)
 ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborNopUint8Nhwc,
-                     ResizeNearestNeighborNopTest<DataType::QuantisedAsymm8>,
+                     ResizeNearestNeighborNopTest<DataType::QAsymmU8>,
                      DataLayout::NHWC)
 ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighborNhwc,
                      SimpleResizeNearestNeighborTest<DataType::Float32>,
                      DataLayout::NHWC)
 ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighborUint8Nhwc,
-                     SimpleResizeNearestNeighborTest<DataType::QuantisedAsymm8>,
+                     SimpleResizeNearestNeighborTest<DataType::QAsymmU8>,
                      DataLayout::NHWC)
 ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMinNhwc,
                      ResizeNearestNeighborSqMinTest<DataType::Float32>,
                      DataLayout::NHWC)
 ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMinUint8Nhwc,
-                     ResizeNearestNeighborSqMinTest<DataType::QuantisedAsymm8>,
+                     ResizeNearestNeighborSqMinTest<DataType::QAsymmU8>,
                      DataLayout::NHWC)
 ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMinNhwc,
                      ResizeNearestNeighborMinTest<DataType::Float32>,
                      DataLayout::NHWC)
 ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMinUint8Nhwc,
-                     ResizeNearestNeighborMinTest<DataType::QuantisedAsymm8>,
+                     ResizeNearestNeighborMinTest<DataType::QAsymmU8>,
                      DataLayout::NHWC)
 ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMagNhwc,
                      ResizeNearestNeighborMagTest<DataType::Float32>,
                      DataLayout::NHWC, 0.1f, 50, 0.1f, 50)
 ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMagUint8Nhwc,
-                     ResizeNearestNeighborMagTest<DataType::QuantisedAsymm8>,
+                     ResizeNearestNeighborMagTest<DataType::QAsymmU8>,
                      DataLayout::NHWC, 0.1f, 50, 0.1f, 50)
 
 // Rsqrt
@@ -777,11 +777,11 @@ ARMNN_AUTO_TEST_CASE(SimpleTransposeConvolution2dFloatNhwc,
                      true,
                      DataLayout::NHWC)
 ARMNN_AUTO_TEST_CASE(SimpleTransposeConvolution2dUint8Nchw,
-                     SimpleTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+                     SimpleTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
                      true,
                      DataLayout::NCHW)
 ARMNN_AUTO_TEST_CASE(SimpleTransposeConvolution2dUint8Nhwc,
-                     SimpleTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+                     SimpleTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
                      true,
                      DataLayout::NHWC)
 
@@ -794,11 +794,11 @@ ARMNN_AUTO_TEST_CASE(UnbiasedSimpleTransposeConvolution2dFloatNhwc,
                      true,
                      DataLayout::NHWC)
 ARMNN_AUTO_TEST_CASE(UnbiasedSimpleTransposeConvolution2dUint8Nchw,
-                     SimpleTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+                     SimpleTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
                      true,
                      DataLayout::NCHW)
 ARMNN_AUTO_TEST_CASE(UnbiasedSimpleTransposeConvolution2dUint8Nhwc,
-                     SimpleTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+                     SimpleTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
                      true,
                      DataLayout::NHWC)
 
@@ -811,11 +811,11 @@ ARMNN_AUTO_TEST_CASE(PaddedTransposeConvolution2dFloatNhwc,
                      true,
                      DataLayout::NHWC)
 ARMNN_AUTO_TEST_CASE(PaddedTransposeConvolution2dUint8Nchw,
-                     PaddedTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+                     PaddedTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
                      true,
                      DataLayout::NCHW)
 ARMNN_AUTO_TEST_CASE(PaddedTransposeConvolution2dUint8Nhwc,
-                     PaddedTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+                     PaddedTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
                      true,
                      DataLayout::NHWC)
 
@@ -828,11 +828,11 @@ ARMNN_AUTO_TEST_CASE(UnbiasedPaddedTransposeConvolution2dFloatNhwc,
                      true,
                      DataLayout::NHWC)
 ARMNN_AUTO_TEST_CASE(UnbiasedPaddedTransposeConvolution2dUint8Nchw,
-                     PaddedTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+                     PaddedTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
                      true,
                      DataLayout::NCHW)
 ARMNN_AUTO_TEST_CASE(UnbiasedPaddedTransposeConvolution2dUint8Nhwc,
-                     PaddedTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+                     PaddedTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
                      true,
                      DataLayout::NHWC)
 
@@ -845,11 +845,11 @@ ARMNN_AUTO_TEST_CASE(StridedTransposeConvolution2dFloatNhwc,
                      true,
                      DataLayout::NHWC)
 ARMNN_AUTO_TEST_CASE(StridedTransposeConvolution2dUint8Nchw,
-                     StridedTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+                     StridedTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
                      true,
                      DataLayout::NCHW)
 ARMNN_AUTO_TEST_CASE(StridedTransposeConvolution2dUint8Nhwc,
-                     StridedTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+                     StridedTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
                      true,
                      DataLayout::NHWC)
 
@@ -862,11 +862,11 @@ ARMNN_AUTO_TEST_CASE(UnbiasedStridedTransposeConvolution2dFloatNhwc,
                      true,
                      DataLayout::NHWC)
 ARMNN_AUTO_TEST_CASE(UnbiasedStridedTransposeConvolution2dUint8Nchw,
-                     StridedTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+                     StridedTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
                      true,
                      DataLayout::NCHW)
 ARMNN_AUTO_TEST_CASE(UnbiasedStridedTransposeConvolution2dUint8Nhwc,
-                     StridedTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+                     StridedTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
                      true,
                      DataLayout::NHWC)
 
@@ -877,10 +877,10 @@ ARMNN_AUTO_TEST_CASE(MultiChannelTransposeConvolution2dFloatNhwc,
                      MultiChannelTransposeConvolution2dTest<DataType::Float32, DataType::Float32>,
                      DataLayout::NHWC)
 ARMNN_AUTO_TEST_CASE(MultiChannelTransposeConvolution2dUint8Nchw,
-                     MultiChannelTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+                     MultiChannelTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
                      DataLayout::NCHW)
 ARMNN_AUTO_TEST_CASE(MultiChannelTransposeConvolution2dUint8Nhwc,
-                     MultiChannelTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+                     MultiChannelTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
                      DataLayout::NHWC)
 
 // Abs
index 435ead853f8adb4720a0f7b7317434a314e7b8f5..b086122bdc02d807d2b41ccacb747332b036bef1 100644 (file)
@@ -60,6 +60,6 @@ void ClGreaterWorkload<T>::Execute() const
 }
 
 template class ClGreaterWorkload<DataType::Float32>;
-template class ClGreaterWorkload<DataType::QuantisedAsymm8>;
+template class ClGreaterWorkload<DataType::QAsymmU8>;
 
 } //namespace armnn
index 69d017e6c6620e4695d182819ea2cf205add6f34..84a24fffa3d415b858007470ef2da08e3c209178 100644 (file)
@@ -29,6 +29,6 @@ private:
 };
 
 using ClGreaterFloat32Workload = ClGreaterWorkload<DataType::Float32>;
-using ClGreaterUint8Workload = ClGreaterWorkload<DataType::QuantisedAsymm8>;
+using ClGreaterUint8Workload = ClGreaterWorkload<DataType::QAsymmU8>;
 
 } //namespace armnn
index ecaf2799e8c4f43a8948087ae1e391abf376daca..c5cfcd8fc1aa32f1763f2f299a5dec572d857d8b 100644 (file)
@@ -98,7 +98,7 @@ inline void InitializeArmComputeClTensorData(arm_compute::CLTensor& clTensor,
         case DataType::Float32:
             CopyArmComputeClTensorData(clTensor, handle->GetConstTensor<float>());
             break;
-        case DataType::QuantisedAsymm8:
+        case DataType::QAsymmU8:
             CopyArmComputeClTensorData(clTensor, handle->GetConstTensor<uint8_t>());
             break;
         case DataType::QuantizedSymm8PerAxis:
index 6a4f612881b6bbcec163a1e58d5b49b2393b3788..a08c8f7d2a8e56137fae96b0187a09698cdaa55a 100644 (file)
@@ -152,7 +152,7 @@ BOOST_AUTO_TEST_CASE(CreateSubtractionUint8Workload)
     NeonCreateElementwiseWorkloadTest<NeonSubtractionWorkload,
                                       SubtractionQueueDescriptor,
                                       SubtractionLayer,
-                                      DataType::QuantisedAsymm8>();
+                                      DataType::QAsymmU8>();
 }
 
 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
@@ -178,7 +178,7 @@ BOOST_AUTO_TEST_CASE(CreateMultiplicationUint8Workload)
     NeonCreateElementwiseWorkloadTest<NeonMultiplicationWorkload,
                                       MultiplicationQueueDescriptor,
                                       MultiplicationLayer,
-                                      DataType::QuantisedAsymm8>();
+                                      DataType::QAsymmU8>();
 }
 
 template <typename WorkloadType,
@@ -445,12 +445,12 @@ BOOST_AUTO_TEST_CASE(CreatePooling2dFloatNhwcWorkload)
 
 BOOST_AUTO_TEST_CASE(CreatePooling2dUint8NchwWorkload)
 {
-    NeonCreatePooling2dWorkloadTest<DataType::QuantisedAsymm8>(DataLayout::NCHW);
+    NeonCreatePooling2dWorkloadTest<DataType::QAsymmU8>(DataLayout::NCHW);
 }
 
 BOOST_AUTO_TEST_CASE(CreatePooling2dUint8NhwcWorkload)
 {
-    NeonCreatePooling2dWorkloadTest<DataType::QuantisedAsymm8>(DataLayout::NHWC);
+    NeonCreatePooling2dWorkloadTest<DataType::QAsymmU8>(DataLayout::NHWC);
 }
 
 static void NeonCreatePreluWorkloadTest(const armnn::TensorShape& inputShape,
@@ -493,7 +493,7 @@ BOOST_AUTO_TEST_CASE(CreatePreluFloatWorkload)
 
 BOOST_AUTO_TEST_CASE(CreatePreluUint8Workload)
 {
-    NeonCreatePreluWorkloadTest({ 1, 4, 1, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 }, DataType::QuantisedAsymm8);
+    NeonCreatePreluWorkloadTest({ 1, 4, 1, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 }, DataType::QAsymmU8);
 }
 
 template <typename armnn::DataType DataType>
@@ -527,7 +527,7 @@ BOOST_AUTO_TEST_CASE(CreateReshapeFloatWorkload)
 
 BOOST_AUTO_TEST_CASE(CreateReshapeUint8Workload)
 {
-    NeonCreateReshapeWorkloadTest<DataType::QuantisedAsymm8>();
+    NeonCreateReshapeWorkloadTest<DataType::QAsymmU8>();
 }
 
 template <typename ResizeWorkloadType, armnn::DataType DataType>
@@ -563,7 +563,7 @@ BOOST_AUTO_TEST_CASE(CreateResizeFloat32NchwWorkload)
 
 BOOST_AUTO_TEST_CASE(CreateResizeUint8NchwWorkload)
 {
-    NeonCreateResizeWorkloadTest<NeonResizeWorkload, armnn::DataType::QuantisedAsymm8>(DataLayout::NCHW);
+    NeonCreateResizeWorkloadTest<NeonResizeWorkload, armnn::DataType::QAsymmU8>(DataLayout::NCHW);
 }
 
 BOOST_AUTO_TEST_CASE(CreateResizeFloat32NhwcWorkload)
@@ -573,7 +573,7 @@ BOOST_AUTO_TEST_CASE(CreateResizeFloat32NhwcWorkload)
 
 BOOST_AUTO_TEST_CASE(CreateResizeUint8NhwcWorkload)
 {
-    NeonCreateResizeWorkloadTest<NeonResizeWorkload, armnn::DataType::QuantisedAsymm8>(DataLayout::NHWC);
+    NeonCreateResizeWorkloadTest<NeonResizeWorkload, armnn::DataType::QAsymmU8>(DataLayout::NHWC);
 }
 
 template <typename SoftmaxWorkloadType, typename armnn::DataType DataType>
@@ -634,12 +634,12 @@ BOOST_AUTO_TEST_CASE(CreateSpaceToDepthFloat16Workload)
 
 BOOST_AUTO_TEST_CASE(CreateSpaceToDepthQAsymm8Workload)
 {
-    NeonSpaceToDepthWorkloadTest<NeonSpaceToDepthWorkload, armnn::DataType::QuantisedAsymm8>();
+    NeonSpaceToDepthWorkloadTest<NeonSpaceToDepthWorkload, armnn::DataType::QAsymmU8>();
 }
 
 BOOST_AUTO_TEST_CASE(CreateSpaceToDepthQSymm16Workload)
 {
-    NeonSpaceToDepthWorkloadTest<NeonSpaceToDepthWorkload, armnn::DataType::QuantisedSymm16>();
+    NeonSpaceToDepthWorkloadTest<NeonSpaceToDepthWorkload, armnn::DataType::QSymmS16>();
 }
 
 BOOST_AUTO_TEST_CASE(CreateSplitterWorkload)
@@ -859,17 +859,17 @@ BOOST_AUTO_TEST_CASE(CreateConcatDim3Float32Workload)
 
 BOOST_AUTO_TEST_CASE(CreateConcatDim0Uint8Workload)
 {
-    NeonCreateConcatWorkloadTest<NeonConcatWorkload, armnn::DataType::QuantisedAsymm8>({ 4, 3, 2, 5 }, 0);
+    NeonCreateConcatWorkloadTest<NeonConcatWorkload, armnn::DataType::QAsymmU8>({ 4, 3, 2, 5 }, 0);
 }
 
 BOOST_AUTO_TEST_CASE(CreateConcatDim1Uint8Workload)
 {
-    NeonCreateConcatWorkloadTest<NeonConcatWorkload, armnn::DataType::QuantisedAsymm8>({ 2, 6, 2, 5 }, 1);
+    NeonCreateConcatWorkloadTest<NeonConcatWorkload, armnn::DataType::QAsymmU8>({ 2, 6, 2, 5 }, 1);
 }
 
 BOOST_AUTO_TEST_CASE(CreateConcatDim3Uint8Workload)
 {
-    NeonCreateConcatWorkloadTest<NeonConcatWorkload, armnn::DataType::QuantisedAsymm8>({ 2, 3, 2, 10 }, 3);
+    NeonCreateConcatWorkloadTest<NeonConcatWorkload, armnn::DataType::QAsymmU8>({ 2, 3, 2, 10 }, 3);
 }
 
 template <armnn::DataType DataType>
@@ -914,7 +914,7 @@ BOOST_AUTO_TEST_CASE(CreateStackFloat16Workload)
 
 BOOST_AUTO_TEST_CASE(CreateStackUint8Workload)
 {
-    NeonCreateStackWorkloadTest<armnn::DataType::QuantisedAsymm8>({ 3, 4, 5 }, { 3, 4, 2, 5 }, 2, 2);
+    NeonCreateStackWorkloadTest<armnn::DataType::QAsymmU8>({ 3, 4, 5 }, { 3, 4, 2, 5 }, 2, 2);
 }
 
 template <typename QuantizedLstmWorkloadType>
index 2f4c8479714841477f00d8f04026110b3fb15578..e1c929b17b0e44278b141a00f41400c77998bd7a 100644 (file)
@@ -92,7 +92,7 @@ BOOST_AUTO_TEST_CASE(NeonGreaterSimpleEndToEndUint8Test)
     const std::vector<uint8_t> expectedOutput({ 0, 0, 0, 0,  1, 1, 1, 1,
                                                 0, 0, 0, 0,  0, 0, 0, 0 });
 
-    ComparisonSimpleEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends,
+    ComparisonSimpleEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends,
                                                                ComparisonOperation::Greater,
                                                                expectedOutput);
 }
@@ -112,7 +112,7 @@ BOOST_AUTO_TEST_CASE(NeonGreaterBroadcastEndToEndUint8Test)
     const std::vector<uint8_t> expectedOutput({ 0, 1, 0, 0, 0, 1,
                                                 1, 1, 1, 1, 1, 1 });
 
-    ComparisonBroadcastEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends,
+    ComparisonBroadcastEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends,
                                                                   ComparisonOperation::Greater,
                                                                   expectedOutput);
 }
@@ -124,7 +124,7 @@ BOOST_AUTO_TEST_CASE(NeonConcatEndToEndDim0Test)
 
 BOOST_AUTO_TEST_CASE(NeonConcatEndToEndDim0Uint8Test)
 {
-    ConcatDim0EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+    ConcatDim0EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
 BOOST_AUTO_TEST_CASE(NeonConcatEndToEndDim1Test)
@@ -134,7 +134,7 @@ BOOST_AUTO_TEST_CASE(NeonConcatEndToEndDim1Test)
 
 BOOST_AUTO_TEST_CASE(NeonConcatEndToEndDim1Uint8Test)
 {
-    ConcatDim1EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+    ConcatDim1EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
 BOOST_AUTO_TEST_CASE(NeonConcatEndToEndDim3Test)
@@ -144,7 +144,7 @@ BOOST_AUTO_TEST_CASE(NeonConcatEndToEndDim3Test)
 
 BOOST_AUTO_TEST_CASE(NeonConcatEndToEndDim3Uint8Test)
 {
-    ConcatDim3EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+    ConcatDim3EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
 // DepthToSpace
@@ -160,12 +160,12 @@ BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNchwFloat16)
 
 BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNchwUint8)
 {
-    DepthToSpaceEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends, armnn::DataLayout::NCHW);
+    DepthToSpaceEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, armnn::DataLayout::NCHW);
 }
 
 BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNchwInt16)
 {
-    DepthToSpaceEndToEnd<armnn::DataType::QuantisedSymm16>(defaultBackends, armnn::DataLayout::NCHW);
+    DepthToSpaceEndToEnd<armnn::DataType::QSymmS16>(defaultBackends, armnn::DataLayout::NCHW);
 }
 
 BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNhwcFloat32)
@@ -180,23 +180,23 @@ BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNhwcFloat16)
 
 BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNhwcUint8)
 {
-    DepthToSpaceEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends, armnn::DataLayout::NHWC);
+    DepthToSpaceEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, armnn::DataLayout::NHWC);
 }
 
 BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNhwcInt16)
 {
-    DepthToSpaceEndToEnd<armnn::DataType::QuantisedSymm16>(defaultBackends, armnn::DataLayout::NHWC);
+    DepthToSpaceEndToEnd<armnn::DataType::QSymmS16>(defaultBackends, armnn::DataLayout::NHWC);
 }
 
 // Dequantize
 BOOST_AUTO_TEST_CASE(DequantizeEndToEndSimpleTest)
 {
-    DequantizeEndToEndSimple<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+    DequantizeEndToEndSimple<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
 BOOST_AUTO_TEST_CASE(DequantizeEndToEndOffsetTest)
 {
-    DequantizeEndToEndOffset<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+    DequantizeEndToEndOffset<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
 BOOST_AUTO_TEST_CASE(NeonPreluEndToEndFloat32Test)
@@ -206,7 +206,7 @@ BOOST_AUTO_TEST_CASE(NeonPreluEndToEndFloat32Test)
 
 BOOST_AUTO_TEST_CASE(NeonPreluEndToEndTestUint8Test)
 {
-    PreluEndToEndPositiveTest<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+    PreluEndToEndPositiveTest<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
 BOOST_AUTO_TEST_CASE(NeonSpaceToDepthNhwcEndToEndTest1)
@@ -236,7 +236,7 @@ BOOST_AUTO_TEST_CASE(NeonSplitter1dEndToEndTest)
 
 BOOST_AUTO_TEST_CASE(NeonSplitter1dEndToEndUint8Test)
 {
-    Splitter1dEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+    Splitter1dEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
 BOOST_AUTO_TEST_CASE(NeonSplitter2dDim0EndToEndTest)
@@ -251,12 +251,12 @@ BOOST_AUTO_TEST_CASE(NeonSplitter2dDim1EndToEndTest)
 
 BOOST_AUTO_TEST_CASE(NeonSplitter2dDim0EndToEndUint8Test)
 {
-    Splitter2dDim0EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+    Splitter2dDim0EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
 BOOST_AUTO_TEST_CASE(NeonSplitter2dDim1EndToEndUint8Test)
 {
-    Splitter2dDim1EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+    Splitter2dDim1EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
 BOOST_AUTO_TEST_CASE(NeonSplitter3dDim0EndToEndTest)
@@ -276,17 +276,17 @@ BOOST_AUTO_TEST_CASE(NeonSplitter3dDim2EndToEndTest)
 
 BOOST_AUTO_TEST_CASE(NeonSplitter3dDim0EndToEndUint8Test)
 {
-    Splitter3dDim0EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+    Splitter3dDim0EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
 BOOST_AUTO_TEST_CASE(NeonSplitter3dDim1EndToEndUint8Test)
 {
-    Splitter3dDim1EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+    Splitter3dDim1EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
 BOOST_AUTO_TEST_CASE(NeonSplitter3dDim2EndToEndUint8Test)
 {
-    Splitter3dDim2EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+    Splitter3dDim2EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
 BOOST_AUTO_TEST_CASE(NeonSplitter4dDim0EndToEndTest)
@@ -311,22 +311,22 @@ BOOST_AUTO_TEST_CASE(NeonSplitter4dDim3EndToEndTest)
 
 BOOST_AUTO_TEST_CASE(NeonSplitter4dDim0EndToEndUint8Test)
 {
-    Splitter4dDim0EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+    Splitter4dDim0EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
 BOOST_AUTO_TEST_CASE(NeonSplitter4dDim1EndToEndUint8Test)
 {
-    Splitter4dDim1EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+    Splitter4dDim1EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
 BOOST_AUTO_TEST_CASE(NeonSplitter4dDim2EndToEndUint8Test)
 {
-    Splitter4dDim2EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+    Splitter4dDim2EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
 BOOST_AUTO_TEST_CASE(NeonSplitter4dDim3EndToEndUint8Test)
 {
-    Splitter4dDim3EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+    Splitter4dDim3EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
 BOOST_AUTO_TEST_CASE(NeonQuantizedLstmEndToEndTest)
@@ -342,7 +342,7 @@ BOOST_AUTO_TEST_CASE(NeonTransposeConvolution2dEndToEndFloatNchwTest)
 
 BOOST_AUTO_TEST_CASE(NeonTransposeConvolution2dEndToEndUint8NchwTest)
 {
-    TransposeConvolution2dEndToEnd<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
+    TransposeConvolution2dEndToEnd<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
         defaultBackends, armnn::DataLayout::NCHW);
 }
 
@@ -354,7 +354,7 @@ BOOST_AUTO_TEST_CASE(NeonTransposeConvolution2dEndToEndFloatNhwcTest)
 
 BOOST_AUTO_TEST_CASE(NeonTransposeConvolution2dEndToEndUint8NhwcTest)
 {
-    TransposeConvolution2dEndToEnd<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
+    TransposeConvolution2dEndToEnd<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
         defaultBackends, armnn::DataLayout::NHWC);
 }
 
@@ -457,52 +457,52 @@ BOOST_AUTO_TEST_CASE(NeonArgMinAxis3Test)
 
 BOOST_AUTO_TEST_CASE(NeonArgMaxSimpleTestQuantisedAsymm8)
 {
-    ArgMaxEndToEndSimple<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+    ArgMaxEndToEndSimple<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
 BOOST_AUTO_TEST_CASE(NeonArgMinSimpleTestQuantisedAsymm8)
 {
-    ArgMinEndToEndSimple<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+    ArgMinEndToEndSimple<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
 BOOST_AUTO_TEST_CASE(NeonArgMaxAxis0TestQuantisedAsymm8)
 {
-    ArgMaxAxis0EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+    ArgMaxAxis0EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
 BOOST_AUTO_TEST_CASE(NeonArgMinAxis0TestQuantisedAsymm8)
 {
-    ArgMinAxis0EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+    ArgMinAxis0EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
 BOOST_AUTO_TEST_CASE(NeonArgMaxAxis1TestQuantisedAsymm8)
 {
-    ArgMaxAxis1EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+    ArgMaxAxis1EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
 BOOST_AUTO_TEST_CASE(NeonArgMinAxis1TestQuantisedAsymm8)
 {
-    ArgMinAxis1EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+    ArgMinAxis1EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
 BOOST_AUTO_TEST_CASE(NeonArgMaxAxis2TestQuantisedAsymm8)
 {
-    ArgMaxAxis2EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+    ArgMaxAxis2EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
 BOOST_AUTO_TEST_CASE(NeonArgMinAxis2TestQuantisedAsymm8)
 {
-    ArgMinAxis2EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+    ArgMinAxis2EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
 BOOST_AUTO_TEST_CASE(NeonArgMaxAxis3TestQuantisedAsymm8)
 {
-    ArgMaxAxis3EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+    ArgMaxAxis3EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
 BOOST_AUTO_TEST_CASE(NeonArgMinAxis3TestQuantisedAsymm8)
 {
-    ArgMinAxis3EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+    ArgMinAxis3EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
 BOOST_AUTO_TEST_CASE(NeonDetectionPostProcessRegularNmsTest)
@@ -586,7 +586,7 @@ BOOST_AUTO_TEST_CASE(NeonDetectionPostProcessRegularNmsUint8Test)
     QuantizeData(qBoxEncodings.data(), boxEncodings.data(), boxEncodingsInfo);
     QuantizeData(qScores.data(), scores.data(), scoresInfo);
     QuantizeData(qAnchors.data(), anchors.data(), anchorsInfo);
-    DetectionPostProcessRegularNmsEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends, qBoxEncodings,
+    DetectionPostProcessRegularNmsEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, qBoxEncodings,
                                                                              qScores, qAnchors,
                                                                              1.0f, 1, 0.01f, 0, 0.5f, 0);
 }
@@ -664,7 +664,7 @@ BOOST_AUTO_TEST_CASE(RefDetectionPostProcessFastNmsUint8Test)
     QuantizeData(qBoxEncodings.data(), boxEncodings.data(), boxEncodingsInfo);
     QuantizeData(qScores.data(), scores.data(), scoresInfo);
     QuantizeData(qAnchors.data(), anchors.data(), anchorsInfo);
-    DetectionPostProcessFastNmsEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends, qBoxEncodings,
+    DetectionPostProcessFastNmsEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, qBoxEncodings,
                                                                           qScores, qAnchors,
                                                                           1.0f, 1, 0.01f, 0, 0.5f, 0);
 }
index 435afd23a6267edef572e312054a75c1c6f17613..57e69ec5912dee38208f06eea5f40ac897342b47 100644 (file)
@@ -38,7 +38,7 @@ BOOST_AUTO_TEST_CASE(IsLayerSupportedUint8Neon)
 {
     armnn::NeonWorkloadFactory factory =
         NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager());
-    IsLayerSupportedTests<armnn::NeonWorkloadFactory, armnn::DataType::QuantisedAsymm8>(&factory);
+    IsLayerSupportedTests<armnn::NeonWorkloadFactory, armnn::DataType::QAsymmU8>(&factory);
 }
 
 BOOST_AUTO_TEST_CASE(IsConvertFp16ToFp32SupportedNeon)
index d645168456f978afbf579aff76309ea13997a2a4..cde14350457ca3529243bb80f62dd281d2a04fd0 100644 (file)
@@ -58,11 +58,11 @@ ARMNN_AUTO_TEST_CASE(Convolution2d3x3Dilation3x3Nhwc,
                      false,
                      DataLayout::NHWC)
 ARMNN_AUTO_TEST_CASE(Convolution2d3x3Dilation3x3Uint8,
-                     Convolution2d3x3Dilation3x3Test<DataType::QuantisedAsymm8, DataType::Signed32>,
+                     Convolution2d3x3Dilation3x3Test<DataType::QAsymmU8, DataType::Signed32>,
                      false,
                      DataLayout::NCHW)
 ARMNN_AUTO_TEST_CASE(Convolution2d3x3Dilation3x3NhwcUint8,
-                     Convolution2d3x3Dilation3x3Test<DataType::QuantisedAsymm8, DataType::Signed32>,
+                     Convolution2d3x3Dilation3x3Test<DataType::QAsymmU8, DataType::Signed32>,
                      false,
                      DataLayout::NHWC)
 ARMNN_AUTO_TEST_CASE(Convolution2d2x3x3Dilation3x3,
@@ -74,11 +74,11 @@ ARMNN_AUTO_TEST_CASE(Convolution2d2x3x3Dilation3x3Nhwc,
                      false,
                      DataLayout::NHWC)
 ARMNN_AUTO_TEST_CASE(Convolution2d2x3x3Dilation3x3Uint8,
-                     Convolution2d2x3x3Dilation3x3Test<DataType::QuantisedAsymm8, DataType::Signed32>,
+                     Convolution2d2x3x3Dilation3x3Test<DataType::QAsymmU8, DataType::Signed32>,
                      false,
                      DataLayout::NCHW)
 ARMNN_AUTO_TEST_CASE(Convolution2d2x3x3Dilation3x3NhwcUint8,
-                     Convolution2d2x3x3Dilation3x3Test<DataType::QuantisedAsymm8, DataType::Signed32>,
+                     Convolution2d2x3x3Dilation3x3Test<DataType::QAsymmU8, DataType::Signed32>,
                      false,
                      DataLayout::NHWC)
 ARMNN_AUTO_TEST_CASE(Convolution2d2x2Dilation2x2Padding2x2Stride3x3,
@@ -93,12 +93,12 @@ ARMNN_AUTO_TEST_CASE(Convolution2d2x2Dilation2x2Padding2x2Stride3x3Nhwc,
                      DataLayout::NHWC)
 ARMNN_AUTO_TEST_CASE(Convolution2d2x2Dilation2x2Padding2x2Stride3x3Uint8,
                      Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test
-                             <DataType::QuantisedAsymm8, DataType::Signed32>,
+                             <DataType::QAsymmU8, DataType::Signed32>,
                      false,
                      DataLayout::NCHW)
 ARMNN_AUTO_TEST_CASE(Convolution2d2x2Dilation2x2Padding2x2Stride3x3NhwcUint8,
                      Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test
-                             <DataType::QuantisedAsymm8, DataType::Signed32>,
+                             <DataType::QAsymmU8, DataType::Signed32>,
                      false,
                      DataLayout::NHWC)
 ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2dMult4,
@@ -124,15 +124,15 @@ ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwFloat16_2, DepthToSpaceTest2<DataType::Floa
 ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwFloat16_3, DepthToSpaceTest3<DataType::Float16>, DataLayout::NCHW);
 ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwFloat16_4, DepthToSpaceTest4<DataType::Float16>, DataLayout::NCHW);
 
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwUint8_1, DepthToSpaceTest1<DataType::QuantisedAsymm8>, DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwUint8_2, DepthToSpaceTest2<DataType::QuantisedAsymm8>, DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwUint8_3, DepthToSpaceTest3<DataType::QuantisedAsymm8>, DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwUint8_4, DepthToSpaceTest4<DataType::QuantisedAsymm8>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwUint8_1, DepthToSpaceTest1<DataType::QAsymmU8>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwUint8_2, DepthToSpaceTest2<DataType::QAsymmU8>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwUint8_3, DepthToSpaceTest3<DataType::QAsymmU8>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwUint8_4, DepthToSpaceTest4<DataType::QAsymmU8>, DataLayout::NCHW);
 
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt16_1, DepthToSpaceTest1<DataType::QuantisedSymm16>, DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt16_2, DepthToSpaceTest2<DataType::QuantisedSymm16>, DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt16_3, DepthToSpaceTest3<DataType::QuantisedSymm16>, DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt16_4, DepthToSpaceTest4<DataType::QuantisedSymm16>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt16_1, DepthToSpaceTest1<DataType::QSymmS16>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt16_2, DepthToSpaceTest2<DataType::QSymmS16>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt16_3, DepthToSpaceTest3<DataType::QSymmS16>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt16_4, DepthToSpaceTest4<DataType::QSymmS16>, DataLayout::NCHW);
 
 ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcFloat32_1, DepthToSpaceTest1<DataType::Float32>, DataLayout::NHWC);
 ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcFloat32_2, DepthToSpaceTest2<DataType::Float32>, DataLayout::NHWC);
@@ -144,15 +144,15 @@ ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcFloat16_2, DepthToSpaceTest2<DataType::Floa
 ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcFloat16_3, DepthToSpaceTest3<DataType::Float16>, DataLayout::NHWC);
 ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcFloat16_4, DepthToSpaceTest4<DataType::Float16>, DataLayout::NHWC);
 
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcUint8_1, DepthToSpaceTest1<DataType::QuantisedAsymm8>, DataLayout::NHWC);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcUint8_2, DepthToSpaceTest2<DataType::QuantisedAsymm8>, DataLayout::NHWC);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcUint8_3, DepthToSpaceTest3<DataType::QuantisedAsymm8>, DataLayout::NHWC);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcUint8_4, DepthToSpaceTest4<DataType::QuantisedAsymm8>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcUint8_1, DepthToSpaceTest1<DataType::QAsymmU8>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcUint8_2, DepthToSpaceTest2<DataType::QAsymmU8>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcUint8_3, DepthToSpaceTest3<DataType::QAsymmU8>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcUint8_4, DepthToSpaceTest4<DataType::QAsymmU8>, DataLayout::NHWC);
 
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt16_1, DepthToSpaceTest1<DataType::QuantisedSymm16>, DataLayout::NHWC);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt16_2, DepthToSpaceTest2<DataType::QuantisedSymm16>, DataLayout::NHWC);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt16_3, DepthToSpaceTest3<DataType::QuantisedSymm16>, DataLayout::NHWC);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt16_4, DepthToSpaceTest4<DataType::QuantisedSymm16>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt16_1, DepthToSpaceTest1<DataType::QSymmS16>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt16_2, DepthToSpaceTest2<DataType::QSymmS16>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt16_3, DepthToSpaceTest3<DataType::QSymmS16>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt16_4, DepthToSpaceTest4<DataType::QSymmS16>, DataLayout::NHWC);
 
 // Depthwise Convolution
 ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2dDepthMul1,
@@ -445,7 +445,7 @@ ARMNN_AUTO_TEST_CASE(CopyViaSplitterUint8, CopyViaSplitterUint8Test)
 ARMNN_AUTO_TEST_CASE(SimpleConcat, ConcatTest)
 ARMNN_AUTO_TEST_CASE(ConcatUint8, ConcatUint8Test)
 ARMNN_AUTO_TEST_CASE(ConcatUint8DifferentInputOutputQParam,
-                     ConcatDifferentInputOutputQParamTest<DataType::QuantisedAsymm8>, false)
+                     ConcatDifferentInputOutputQParamTest<DataType::QAsymmU8>, false)
 
 // Fully Connected
 ARMNN_AUTO_TEST_CASE(SimpleFullyConnected, FullyConnectedFloat32Test, false, false)
@@ -453,8 +453,8 @@ ARMNN_AUTO_TEST_CASE(SimpleFullyConnectedWithBias, FullyConnectedFloat32Test, tr
 ARMNN_AUTO_TEST_CASE(SimpleFullyConnectedWithTranspose, FullyConnectedFloat32Test, false, true)
 ARMNN_AUTO_TEST_CASE(FullyConnectedLarge, FullyConnectedLargeTest, false)
 ARMNN_AUTO_TEST_CASE(FullyConnectedLargeTransposed, FullyConnectedLargeTest, true)
-ARMNN_AUTO_TEST_CASE(FullyConnectedUint8, FullyConnectedTest<DataType::QuantisedAsymm8>, false)
-ARMNN_AUTO_TEST_CASE(FullyConnectedBiasedUint8, FullyConnectedTest<DataType::QuantisedAsymm8>, true)
+ARMNN_AUTO_TEST_CASE(FullyConnectedUint8, FullyConnectedTest<DataType::QAsymmU8>, false)
+ARMNN_AUTO_TEST_CASE(FullyConnectedBiasedUint8, FullyConnectedTest<DataType::QAsymmU8>, true)
 
 // Add
 ARMNN_AUTO_TEST_CASE(SimpleAdd, AdditionTest)
@@ -566,7 +566,7 @@ ARMNN_AUTO_TEST_CASE(GreaterBroadcast1dVectorUint8, GreaterBroadcast1dVectorUint
 
 // Reshape
 ARMNN_AUTO_TEST_CASE(SimpleReshapeFloat32, SimpleReshapeTest<armnn::DataType::Float32>)
-ARMNN_AUTO_TEST_CASE(SimpleReshapeUint8, SimpleReshapeTest<armnn::DataType::QuantisedAsymm8>)
+ARMNN_AUTO_TEST_CASE(SimpleReshapeUint8, SimpleReshapeTest<armnn::DataType::QAsymmU8>)
 ARMNN_AUTO_TEST_CASE(Reshape5d, Reshape5dTest<armnn::DataType::Float32>)
 
 // Pad
@@ -580,20 +580,20 @@ ARMNN_AUTO_TEST_CASE(PadUint82dCustomPadding, PadUint82dCustomPaddingTest)
 ARMNN_AUTO_TEST_CASE(PadUint83d, PadUint83dTest)
 ARMNN_AUTO_TEST_CASE(PadUint84d, PadUint84dTest)
 
-ARMNN_AUTO_TEST_CASE(Pad2dQSymm16, Pad2dTestCommon<DataType::QuantisedSymm16>, 2.0f, 0, 0.0f)
-ARMNN_AUTO_TEST_CASE(Pad2dQSymm16CustomPadding, Pad2dTestCommon<DataType::QuantisedSymm16>, 2.0f, 0, 1.0f)
-ARMNN_AUTO_TEST_CASE(Pad3dQSymm16, Pad3dTestCommon<DataType::QuantisedSymm16>, 2.0f, 0)
-ARMNN_AUTO_TEST_CASE(Pad4dQSymm16, Pad4dTestCommon<DataType::QuantisedSymm16>, 2.0f, 0)
+ARMNN_AUTO_TEST_CASE(Pad2dQSymm16, Pad2dTestCommon<DataType::QSymmS16>, 2.0f, 0, 0.0f)
+ARMNN_AUTO_TEST_CASE(Pad2dQSymm16CustomPadding, Pad2dTestCommon<DataType::QSymmS16>, 2.0f, 0, 1.0f)
+ARMNN_AUTO_TEST_CASE(Pad3dQSymm16, Pad3dTestCommon<DataType::QSymmS16>, 2.0f, 0)
+ARMNN_AUTO_TEST_CASE(Pad4dQSymm16, Pad4dTestCommon<DataType::QSymmS16>, 2.0f, 0)
 
 // Permute
 ARMNN_AUTO_TEST_CASE(SimplePermuteFloat32, SimplePermuteTest<DataType::Float32>)
 ARMNN_AUTO_TEST_CASE(PermuteFloat32ValueSet1Test, PermuteValueSet1Test<DataType::Float32>)
 ARMNN_AUTO_TEST_CASE(PermuteFloat32ValueSet2Test, PermuteValueSet2Test<DataType::Float32>)
 ARMNN_AUTO_TEST_CASE(PermuteFloat32ValueSet3Test, PermuteValueSet3Test<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE(SimplePermuteQASymm8, SimplePermuteTest<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(PermuteQASymm8ValueSet1Test, PermuteValueSet1Test<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(PermuteQASymm8ValueSet2Test, PermuteValueSet2Test<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(PermuteQASymm8ValueSet3Test, PermuteValueSet3Test<DataType::QuantisedAsymm8>)
+ARMNN_AUTO_TEST_CASE(SimplePermuteQASymm8, SimplePermuteTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(PermuteQASymm8ValueSet1Test, PermuteValueSet1Test<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(PermuteQASymm8ValueSet2Test, PermuteValueSet2Test<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(PermuteQASymm8ValueSet3Test, PermuteValueSet3Test<DataType::QAsymmU8>)
 
 // Lstm
 ARMNN_AUTO_TEST_CASE(LstmLayerFloat32WithCifgWithPeepholeNoProjection,
@@ -616,13 +616,13 @@ ARMNN_AUTO_TEST_CASE(MeanVts1Float32, MeanVts1Test<DataType::Float32>)
 ARMNN_AUTO_TEST_CASE(MeanVts2Float32, MeanVts2Test<DataType::Float32>)
 ARMNN_AUTO_TEST_CASE(MeanVts3Float32, MeanVts3Test<DataType::Float32>)
 
-ARMNN_AUTO_TEST_CASE(MeanSimpleQuantisedAsymm8, MeanSimpleTest<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(MeanSimpleAxisQuantisedAsymm8, MeanSimpleAxisTest<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(MeanKeepDimsQuantisedAsymm8, MeanKeepDimsTest<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(MeanMultipleDimsQuantisedAsymm8, MeanMultipleDimsTest<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(MeanVts1QuantisedAsymm8, MeanVts1Test<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(MeanVts2QuantisedAsymm8, MeanVts2Test<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(MeanVts3QuantisedAsymm8, MeanVts3Test<DataType::QuantisedAsymm8>)
+ARMNN_AUTO_TEST_CASE(MeanSimpleQuantisedAsymm8, MeanSimpleTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(MeanSimpleAxisQuantisedAsymm8, MeanSimpleAxisTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(MeanKeepDimsQuantisedAsymm8, MeanKeepDimsTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(MeanMultipleDimsQuantisedAsymm8, MeanMultipleDimsTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(MeanVts1QuantisedAsymm8, MeanVts1Test<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(MeanVts2QuantisedAsymm8, MeanVts2Test<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(MeanVts3QuantisedAsymm8, MeanVts3Test<DataType::QAsymmU8>)
 
 // Max
 ARMNN_AUTO_TEST_CASE(SimpleMaximum, MaximumSimpleTest)
@@ -650,19 +650,19 @@ ARMNN_AUTO_TEST_CASE(ResizeBilinearMin, ResizeBilinearMinTest<DataType::Float32>
 ARMNN_AUTO_TEST_CASE(ResizeBilinearMag, ResizeBilinearMagTest<DataType::Float32>, DataLayout::NCHW)
 
 ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearUint8,
-                     SimpleResizeBilinearTest<DataType::QuantisedAsymm8>,
+                     SimpleResizeBilinearTest<DataType::QAsymmU8>,
                      DataLayout::NCHW)
 ARMNN_AUTO_TEST_CASE(ResizeBilinearNopUint8,
-                     ResizeBilinearNopTest<DataType::QuantisedAsymm8>,
+                     ResizeBilinearNopTest<DataType::QAsymmU8>,
                      DataLayout::NCHW)
 ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinUint8,
-                     ResizeBilinearSqMinTest<DataType::QuantisedAsymm8>,
+                     ResizeBilinearSqMinTest<DataType::QAsymmU8>,
                      DataLayout::NCHW)
 ARMNN_AUTO_TEST_CASE(ResizeBilinearMinUint8,
-                     ResizeBilinearMinTest<DataType::QuantisedAsymm8>,
+                     ResizeBilinearMinTest<DataType::QAsymmU8>,
                      DataLayout::NCHW)
 ARMNN_AUTO_TEST_CASE(ResizeBilinearMagUint8,
-                     ResizeBilinearMagTest<DataType::QuantisedAsymm8>,
+                     ResizeBilinearMagTest<DataType::QAsymmU8>,
                      DataLayout::NCHW)
 
 // Resize Bilinear - NHWC data layout
@@ -683,19 +683,19 @@ ARMNN_AUTO_TEST_CASE(ResizeBilinearMagNhwc,
                      DataLayout::NHWC)
 
 ARMNN_AUTO_TEST_CASE(ResizeBilinearNopUint8Nhwc,
-                     ResizeBilinearNopTest<DataType::QuantisedAsymm8>,
+                     ResizeBilinearNopTest<DataType::QAsymmU8>,
                      DataLayout::NHWC)
 ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearUint8Nhwc,
-                     SimpleResizeBilinearTest<DataType::QuantisedAsymm8>,
+                     SimpleResizeBilinearTest<DataType::QAsymmU8>,
                      DataLayout::NHWC)
 ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinUint8Nhwc,
-                     ResizeBilinearSqMinTest<DataType::QuantisedAsymm8>,
+                     ResizeBilinearSqMinTest<DataType::QAsymmU8>,
                      DataLayout::NHWC)
 ARMNN_AUTO_TEST_CASE(ResizeBilinearMinUint8Nhwc,
-                     ResizeBilinearMinTest<DataType::QuantisedAsymm8>,
+                     ResizeBilinearMinTest<DataType::QAsymmU8>,
                      DataLayout::NHWC)
 ARMNN_AUTO_TEST_CASE(ResizeBilinearMagUint8Nhwc,
-                     ResizeBilinearMagTest<DataType::QuantisedAsymm8>,
+                     ResizeBilinearMagTest<DataType::QAsymmU8>,
                      DataLayout::NHWC)
 
 // Resize NearestNeighbor - NCHW
@@ -716,19 +716,19 @@ ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMag,
                      DataLayout::NCHW, 0.1f, 50, 0.1f, 50)
 
 ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighborUint8,
-                     SimpleResizeNearestNeighborTest<DataType::QuantisedAsymm8>,
+                     SimpleResizeNearestNeighborTest<DataType::QAsymmU8>,
                      DataLayout::NCHW)
 ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborNopUint8,
-                     ResizeNearestNeighborNopTest<DataType::QuantisedAsymm8>,
+                     ResizeNearestNeighborNopTest<DataType::QAsymmU8>,
                      DataLayout::NCHW)
 ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMinUint8,
-                     ResizeNearestNeighborSqMinTest<DataType::QuantisedAsymm8>,
+                     ResizeNearestNeighborSqMinTest<DataType::QAsymmU8>,
                      DataLayout::NCHW)
 ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMinUint8,
-                     ResizeNearestNeighborMinTest<DataType::QuantisedAsymm8>,
+                     ResizeNearestNeighborMinTest<DataType::QAsymmU8>,
                      DataLayout::NCHW)
 ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMagUint8,
-                     ResizeNearestNeighborMagTest<DataType::QuantisedAsymm8>,
+                     ResizeNearestNeighborMagTest<DataType::QAsymmU8>,
                      DataLayout::NCHW, 0.1f, 50, 0.1f, 50)
 
 // Resize NearestNeighbor - NHWC
@@ -749,19 +749,19 @@ ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMagNhwc,
                      DataLayout::NHWC, 0.1f, 50, 0.1f, 50)
 
 ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborNopUint8Nhwc,
-                     ResizeNearestNeighborNopTest<DataType::QuantisedAsymm8>,
+                     ResizeNearestNeighborNopTest<DataType::QAsymmU8>,
                      DataLayout::NHWC)
 ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighborUint8Nhwc,
-                     SimpleResizeNearestNeighborTest<DataType::QuantisedAsymm8>,
+                     SimpleResizeNearestNeighborTest<DataType::QAsymmU8>,
                      DataLayout::NHWC)
 ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMinUint8Nhwc,
-                     ResizeNearestNeighborSqMinTest<DataType::QuantisedAsymm8>,
+                     ResizeNearestNeighborSqMinTest<DataType::QAsymmU8>,
                      DataLayout::NHWC)
 ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMinUint8Nhwc,
-                     ResizeNearestNeighborMinTest<DataType::QuantisedAsymm8>,
+                     ResizeNearestNeighborMinTest<DataType::QAsymmU8>,
                      DataLayout::NHWC)
 ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMagUint8Nhwc,
-                     ResizeNearestNeighborMagTest<DataType::QuantisedAsymm8>,
+                     ResizeNearestNeighborMagTest<DataType::QAsymmU8>,
                      DataLayout::NHWC, 0.1f, 50, 0.1f, 50)
 
 // Slice
@@ -834,7 +834,7 @@ ARMNN_AUTO_TEST_CASE(QuantizeClampUint8, QuantizeClampUint8Test)
 
 // PReLU
 ARMNN_AUTO_TEST_CASE(PreluFloat32, PreluTest<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE(PreluUint8,   PreluTest<DataType::QuantisedAsymm8>)
+ARMNN_AUTO_TEST_CASE(PreluUint8,   PreluTest<DataType::QAsymmU8>)
 
 // Stack
 ARMNN_AUTO_TEST_CASE(Stack0Axis,           StackAxis0Float32Test)
@@ -854,11 +854,11 @@ ARMNN_AUTO_TEST_CASE(SimpleTransposeConvolution2dFloatNhwc,
                      true,
                      DataLayout::NHWC)
 ARMNN_AUTO_TEST_CASE(SimpleTransposeConvolution2dUint8Nchw,
-                     SimpleTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+                     SimpleTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
                      true,
                      DataLayout::NCHW)
 ARMNN_AUTO_TEST_CASE(SimpleTransposeConvolution2dUint8Nhwc,
-                     SimpleTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+                     SimpleTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
                      true,
                      DataLayout::NHWC)
 
@@ -871,11 +871,11 @@ ARMNN_AUTO_TEST_CASE(UnbiasedSimpleTransposeConvolution2dFloatNhwc,
                      true,
                      DataLayout::NHWC)
 ARMNN_AUTO_TEST_CASE(UnbiasedSimpleTransposeConvolution2dUint8Nchw,
-                     SimpleTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+                     SimpleTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
                      true,
                      DataLayout::NCHW)
 ARMNN_AUTO_TEST_CASE(UnbiasedSimpleTransposeConvolution2dUint8Nhwc,
-                     SimpleTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+                     SimpleTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
                      true,
                      DataLayout::NHWC)
 
@@ -888,11 +888,11 @@ ARMNN_AUTO_TEST_CASE(PaddedTransposeConvolution2dFloatNhwc,
                      true,
                      DataLayout::NHWC)
 ARMNN_AUTO_TEST_CASE(PaddedTransposeConvolution2dUint8Nchw,
-                     PaddedTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+                     PaddedTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
                      true,
                      DataLayout::NCHW)
 ARMNN_AUTO_TEST_CASE(PaddedTransposeConvolution2dUint8Nhwc,
-                     PaddedTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+                     PaddedTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
                      true,
                      DataLayout::NHWC)
 
@@ -905,11 +905,11 @@ ARMNN_AUTO_TEST_CASE(UnbiasedPaddedTransposeConvolution2dFloatNhwc,
                      true,
                      DataLayout::NHWC)
 ARMNN_AUTO_TEST_CASE(UnbiasedPaddedTransposeConvolution2dUint8Nchw,
-                     PaddedTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+                     PaddedTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
                      true,
                      DataLayout::NCHW)
 ARMNN_AUTO_TEST_CASE(UnbiasedPaddedTransposeConvolution2dUint8Nhwc,
-                     PaddedTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+                     PaddedTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
                      true,
                      DataLayout::NHWC)
 
@@ -922,11 +922,11 @@ ARMNN_AUTO_TEST_CASE(StridedTransposeConvolution2dFloatNhwc,
                      true,
                      DataLayout::NHWC)
 ARMNN_AUTO_TEST_CASE(StridedTransposeConvolution2dUint8Nchw,
-                     StridedTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+                     StridedTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
                      true,
                      DataLayout::NCHW)
 ARMNN_AUTO_TEST_CASE(StridedTransposeConvolution2dUint8Nhwc,
-                     StridedTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+                     StridedTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
                      true,
                      DataLayout::NHWC)
 
@@ -939,11 +939,11 @@ ARMNN_AUTO_TEST_CASE(UnbiasedStridedTransposeConvolution2dFloatNhwc,
                      true,
                      DataLayout::NHWC)
 ARMNN_AUTO_TEST_CASE(UnbiasedStridedTransposeConvolution2dUint8Nchw,
-                     StridedTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+                     StridedTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
                      true,
                      DataLayout::NCHW)
 ARMNN_AUTO_TEST_CASE(UnbiasedStridedTransposeConvolution2dUint8Nhwc,
-                     StridedTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+                     StridedTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
                      true,
                      DataLayout::NHWC)
 
@@ -954,10 +954,10 @@ ARMNN_AUTO_TEST_CASE(MultiChannelTransposeConvolution2dFloatNhwc,
                      MultiChannelTransposeConvolution2dTest<DataType::Float32, DataType::Float32>,
                      DataLayout::NHWC)
 ARMNN_AUTO_TEST_CASE(MultiChannelTransposeConvolution2dUint8Nchw,
-                     MultiChannelTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+                     MultiChannelTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
                      DataLayout::NCHW)
 ARMNN_AUTO_TEST_CASE(MultiChannelTransposeConvolution2dUint8Nhwc,
-                     MultiChannelTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+                     MultiChannelTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
                      DataLayout::NHWC)
 
 // Abs
@@ -980,12 +980,12 @@ ARMNN_AUTO_TEST_CASE(ArgMaxChannel, ArgMaxChannelTest<DataType::Float32>)
 ARMNN_AUTO_TEST_CASE(ArgMaxHeight, ArgMaxHeightTest<DataType::Float32>)
 ARMNN_AUTO_TEST_CASE(ArgMinWidth, ArgMinWidthTest<DataType::Float32>)
 
-ARMNN_AUTO_TEST_CASE(ArgMinQAsymm8, ArgMinSimpleTest<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(ArgMaxQAsymm8, ArgMaxSimpleTest<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(ArgMinChannelQAsymm8, ArgMinChannelTest<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(ArgMaxChannelQAsymm8, ArgMaxChannelTest<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(ArgMaxHeightQAsymm8, ArgMaxHeightTest<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(ArgMinWidthQAsymm8, ArgMinWidthTest<DataType::QuantisedAsymm8>)
+ARMNN_AUTO_TEST_CASE(ArgMinQAsymm8, ArgMinSimpleTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(ArgMaxQAsymm8, ArgMaxSimpleTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(ArgMinChannelQAsymm8, ArgMinChannelTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(ArgMaxChannelQAsymm8, ArgMaxChannelTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(ArgMaxHeightQAsymm8, ArgMaxHeightTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(ArgMinWidthQAsymm8, ArgMinWidthTest<DataType::QAsymmU8>)
 
 #if defined(ARMNNREF_ENABLED)
 
index 56e5552dd350a178dcf3977d1f9a90f65557b440..338c7eb1f6a97a6eef82fdb438c1025c67744372 100644 (file)
@@ -72,7 +72,7 @@ NeonFullyConnectedWorkload::NeonFullyConnectedWorkload(const FullyConnectedQueue
     m_FullyConnectedLayer.reset(layer.release());
 
     // Allocate
-    if (m_Data.m_Weight->GetTensorInfo().GetDataType() == DataType::QuantisedAsymm8)
+    if (m_Data.m_Weight->GetTensorInfo().GetDataType() == DataType::QAsymmU8)
     {
         InitializeArmComputeTensorData(*m_WeightsTensor, m_Data.m_Weight);
     }
index 62396261e1cc4573c2badc74333ca639d6e2f224..6380dfada53a6ca92096b1e0da0b77dd23625f4d 100644 (file)
@@ -44,6 +44,6 @@ void NeonGreaterWorkload<T>::Execute() const
 }
 
 template class NeonGreaterWorkload<DataType::Float32>;
-template class NeonGreaterWorkload<DataType::QuantisedAsymm8>;
+template class NeonGreaterWorkload<DataType::QAsymmU8>;
 
 } //namespace armnn
\ No newline at end of file
index df1e07e07bbed366b2f5c53638f38a9add23337c..bcab27e7a62b6fc2ce11e375f94c9f87d9a1d0d3 100644 (file)
@@ -31,6 +31,6 @@ private:
 };
 
 using NeonGreaterFloat32Workload = NeonGreaterWorkload<DataType::Float32>;
-using NeonGreaterUint8Workload = NeonGreaterWorkload<DataType::QuantisedAsymm8>;
+using NeonGreaterUint8Workload = NeonGreaterWorkload<DataType::QAsymmU8>;
 
 } //namespace armnn
\ No newline at end of file
index e9edc8901ee9eceef5abe926378c47bc568e0713..f98fe4403987df69c7d270353e319f83df2d00e9 100644 (file)
@@ -43,7 +43,7 @@ inline void InitializeArmComputeTensorData(arm_compute::Tensor& tensor,
         case DataType::Float32:
             CopyArmComputeTensorData(tensor, handle->GetConstTensor<float>());
             break;
-        case DataType::QuantisedAsymm8:
+        case DataType::QAsymmU8:
             CopyArmComputeTensorData(tensor, handle->GetConstTensor<uint8_t>());
             break;
         case DataType::QuantizedSymm8PerAxis:
index ebcd1f633e44bb08c87cab2ec1d6756959eea5d9..4767aa0b3b3b2d9095782f0be8d2c619cc304185 100644 (file)
@@ -75,8 +75,8 @@ bool RefLayerSupport::IsAbsSupported(const TensorInfo& input, const TensorInfo&
         {
             DataType::Float32,
             DataType::Float16,
-            DataType::QuantisedAsymm8,
-            DataType::QuantisedSymm16
+            DataType::QAsymmU8,
+            DataType::QSymmS16
         };
 
     supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
@@ -105,8 +105,8 @@ bool RefLayerSupport::IsActivationSupported(const TensorInfo& input,
     std::array<DataType,4> supportedTypes = {
         DataType::Float32,
         DataType::Float16,
-        DataType::QuantisedAsymm8,
-        DataType::QuantisedSymm16
+        DataType::QAsymmU8,
+        DataType::QSymmS16
     };
 
     supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
@@ -168,8 +168,8 @@ bool RefLayerSupport::IsAdditionSupported(const TensorInfo& input0,
     std::array<DataType,4> supportedTypes = {
         DataType::Float32,
         DataType::Float16,
-        DataType::QuantisedAsymm8,
-        DataType::QuantisedSymm16
+        DataType::QAsymmU8,
+        DataType::QSymmS16
     };
 
     supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
@@ -202,8 +202,8 @@ bool RefLayerSupport::IsArgMinMaxSupported(const armnn::TensorInfo &input, const
     std::array<DataType, 4> supportedTypes =
     {
         DataType::Float32,
-        DataType::QuantisedAsymm8,
-        DataType::QuantisedSymm16,
+        DataType::QAsymmU8,
+        DataType::QSymmS16,
         DataType::Signed32
     };
 
@@ -232,8 +232,8 @@ bool RefLayerSupport::IsBatchNormalizationSupported(const TensorInfo& input,
     {
         DataType::Float32,
         DataType::Float16,
-        DataType::QuantisedAsymm8,
-        DataType::QuantisedSymm16
+        DataType::QAsymmU8,
+        DataType::QSymmS16
     };
 
     bool supported = true;
@@ -280,8 +280,8 @@ bool RefLayerSupport::IsBatchToSpaceNdSupported(const TensorInfo& input,
     {
             DataType::Float32,
             DataType::Float16,
-            DataType::QuantisedAsymm8,
-            DataType::QuantisedSymm16
+            DataType::QAsymmU8,
+            DataType::QSymmS16
     };
 
     supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
@@ -322,8 +322,8 @@ bool RefLayerSupport::IsComparisonSupported(const TensorInfo& input0,
     {
         DataType::Float32,
         DataType::Float16,
-        DataType::QuantisedAsymm8,
-        DataType::QuantisedSymm16
+        DataType::QAsymmU8,
+        DataType::QSymmS16
     };
 
     bool supported = true;
@@ -351,8 +351,8 @@ bool RefLayerSupport::IsConcatSupported(const std::vector<const TensorInfo*> inp
     {
             DataType::Float32,
             DataType::Float16,
-            DataType::QuantisedAsymm8,
-            DataType::QuantisedSymm16
+            DataType::QAsymmU8,
+            DataType::QSymmS16
     };
 
     supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
@@ -377,8 +377,8 @@ bool RefLayerSupport::IsConstantSupported(const TensorInfo& output,
     {
         DataType::Float32,
         DataType::Signed32,
-        DataType::QuantisedAsymm8,
-        DataType::QuantisedSymm16
+        DataType::QAsymmU8,
+        DataType::QSymmS16
     };
 
     return CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
@@ -439,8 +439,8 @@ bool RefLayerSupport::IsConvolution2dSupported(const TensorInfo& input,
     {
         DataType::Float32,
         DataType::Float16,
-        DataType::QuantisedAsymm8,
-        DataType::QuantisedSymm16
+        DataType::QAsymmU8,
+        DataType::QSymmS16
     };
 
     supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
@@ -453,11 +453,11 @@ bool RefLayerSupport::IsConvolution2dSupported(const TensorInfo& input,
                                   "Reference convolution2d: input and output types mismatched.");
 
     const DataType inputType = input.GetDataType();
-    if (inputType == DataType::QuantisedAsymm8)
+    if (inputType == DataType::QAsymmU8)
     {
         std::array<DataType, 2> supportedWeightTypes =
         {
-            DataType::QuantisedAsymm8,
+            DataType::QAsymmU8,
             DataType::QuantizedSymm8PerAxis
         };
 
@@ -500,8 +500,8 @@ bool RefLayerSupport::IsDebugSupported(const TensorInfo& input,
     {
         DataType::Float16,
         DataType::Float32,
-        DataType::QuantisedAsymm8,
-        DataType::QuantisedSymm16
+        DataType::QAsymmU8,
+        DataType::QSymmS16
     };
 
     supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
@@ -528,8 +528,8 @@ bool RefLayerSupport::IsDepthToSpaceSupported(const TensorInfo& input,
     {
         DataType::Float32,
         DataType::Float16,
-        DataType::QuantisedAsymm8,
-        DataType::QuantisedSymm16
+        DataType::QAsymmU8,
+        DataType::QSymmS16
     };
 
     supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
@@ -558,8 +558,8 @@ bool RefLayerSupport::IsDepthwiseConvolutionSupported(const TensorInfo& input,
     {
         DataType::Float32,
         DataType::Float16,
-        DataType::QuantisedAsymm8,
-        DataType::QuantisedSymm16
+        DataType::QAsymmU8,
+        DataType::QSymmS16
     };
 
     supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
@@ -572,11 +572,11 @@ bool RefLayerSupport::IsDepthwiseConvolutionSupported(const TensorInfo& input,
                                   "Reference DepthwiseConvolution2d: input and output types mismatched.");
 
     const DataType inputType = input.GetDataType();
-    if (inputType == DataType::QuantisedAsymm8)
+    if (inputType == DataType::QAsymmU8)
     {
         std::array<DataType, 2> supportedWeightTypes =
         {
-            DataType::QuantisedAsymm8,
+            DataType::QAsymmU8,
             DataType::QuantizedSymm8PerAxis
         };
 
@@ -616,9 +616,9 @@ bool RefLayerSupport::IsDequantizeSupported(const TensorInfo& input,
    bool supported = true;
 
     std::array<DataType,3> supportedInputTypes = {
-        DataType::QuantisedAsymm8,
+        DataType::QAsymmU8,
         DataType::QSymmS8,
-        DataType::QuantisedSymm16
+        DataType::QSymmS16
     };
 
     supported &= CheckSupportRule(TypeAnyOf(input, supportedInputTypes), reasonIfUnsupported,
@@ -655,8 +655,8 @@ bool RefLayerSupport::IsDetectionPostProcessSupported(const TensorInfo& boxEncod
     std::array<DataType,3> supportedInputTypes =
     {
         DataType::Float32,
-        DataType::QuantisedAsymm8,
-        DataType::QuantisedSymm16
+        DataType::QAsymmU8,
+        DataType::QSymmS16
     };
 
     supported &= CheckSupportRule(TypeAnyOf(boxEncodings, supportedInputTypes), reasonIfUnsupported,
@@ -688,8 +688,8 @@ bool RefLayerSupport::IsDivisionSupported(const TensorInfo& input0,
     std::array<DataType,4> supportedTypes = {
         DataType::Float32,
         DataType::Float16,
-        DataType::QuantisedAsymm8,
-        DataType::QuantisedSymm16
+        DataType::QAsymmU8,
+        DataType::QSymmS16
     };
 
     supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
@@ -754,7 +754,7 @@ bool RefLayerSupport::IsFloorSupported(const TensorInfo& input,
     {
         DataType::Float32,
         DataType::Float16,
-        DataType::QuantisedSymm16
+        DataType::QSymmS16
     };
 
     supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
@@ -780,8 +780,8 @@ bool RefLayerSupport::IsFullyConnectedSupported(const TensorInfo& input,
     {
             DataType::Float32,
             DataType::Float16,
-            DataType::QuantisedAsymm8,
-            DataType::QuantisedSymm16
+            DataType::QAsymmU8,
+            DataType::QSymmS16
     };
 
     supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
@@ -834,8 +834,8 @@ bool RefLayerSupport::IsGatherSupported(const armnn::TensorInfo& input0,
     {
         DataType::Float32,
         DataType::Float16,
-        DataType::QuantisedAsymm8,
-        DataType::QuantisedSymm16
+        DataType::QAsymmU8,
+        DataType::QSymmS16
     };
 
     supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
@@ -913,8 +913,8 @@ bool RefLayerSupport::IsL2NormalizationSupported(const TensorInfo& input,
     {
         DataType::Float32,
         DataType::Float16,
-        DataType::QuantisedAsymm8,
-        DataType::QuantisedSymm16
+        DataType::QAsymmU8,
+        DataType::QSymmS16
     };
 
     bool supported = true;
@@ -979,7 +979,7 @@ bool RefLayerSupport::IsLstmSupported(const TensorInfo& input,
 
     std::array<DataType,2> supportedTypes = {
         DataType::Float32,
-        DataType::QuantisedSymm16
+        DataType::QSymmS16
     };
 
     // check inputs and outputs
@@ -1081,8 +1081,8 @@ bool RefLayerSupport::IsMaximumSupported(const TensorInfo& input0,
     std::array<DataType,4> supportedTypes = {
         DataType::Float32,
         DataType::Float16,
-        DataType::QuantisedAsymm8,
-        DataType::QuantisedSymm16
+        DataType::QAsymmU8,
+        DataType::QSymmS16
     };
 
     supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
@@ -1119,8 +1119,8 @@ bool RefLayerSupport::IsMeanSupported(const TensorInfo& input,
     {
         DataType::Float32,
         DataType::Float16,
-        DataType::QuantisedAsymm8,
-        DataType::QuantisedSymm16
+        DataType::QAsymmU8,
+        DataType::QSymmS16
     };
 
     supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
@@ -1185,8 +1185,8 @@ bool RefLayerSupport::IsMemCopySupported(const TensorInfo &input,
     {
         DataType::Float32,
         DataType::Float16,
-        DataType::QuantisedAsymm8,
-        DataType::QuantisedSymm16,
+        DataType::QAsymmU8,
+        DataType::QSymmS16,
         DataType::Boolean
     };
 
@@ -1212,8 +1212,8 @@ bool RefLayerSupport::IsMinimumSupported(const TensorInfo& input0,
     std::array<DataType,4> supportedTypes = {
         DataType::Float32,
         DataType::Float16,
-        DataType::QuantisedAsymm8,
-        DataType::QuantisedSymm16
+        DataType::QAsymmU8,
+        DataType::QSymmS16
     };
 
     supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
@@ -1247,8 +1247,8 @@ bool RefLayerSupport::IsMultiplicationSupported(const TensorInfo& input0,
     std::array<DataType,4> supportedTypes = {
         DataType::Float32,
         DataType::Float16,
-        DataType::QuantisedAsymm8,
-        DataType::QuantisedSymm16
+        DataType::QAsymmU8,
+        DataType::QSymmS16
     };
 
     supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
@@ -1284,8 +1284,8 @@ bool RefLayerSupport::IsNormalizationSupported(const TensorInfo& input,
     {
         DataType::Float16,
         DataType::Float32,
-        DataType::QuantisedAsymm8,
-        DataType::QuantisedSymm16
+        DataType::QAsymmU8,
+        DataType::QSymmS16
     };
 
     bool supported = true;
@@ -1322,8 +1322,8 @@ bool RefLayerSupport::IsPadSupported(const TensorInfo& input,
     {
         DataType::Float32,
         DataType::Float16,
-        DataType::QuantisedAsymm8,
-        DataType::QuantisedSymm16
+        DataType::QAsymmU8,
+        DataType::QSymmS16
     };
 
     supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
@@ -1350,8 +1350,8 @@ bool RefLayerSupport::IsPermuteSupported(const TensorInfo& input,
     std::array<DataType,3> supportedTypes =
     {
         DataType::Float32,
-        DataType::QuantisedAsymm8,
-        DataType::QuantisedSymm16
+        DataType::QAsymmU8,
+        DataType::QSymmS16
     };
 
     supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
@@ -1379,8 +1379,8 @@ bool RefLayerSupport::IsPooling2dSupported(const TensorInfo& input,
     {
         DataType::Float32,
         DataType::Float16,
-        DataType::QuantisedAsymm8,
-        DataType::QuantisedSymm16
+        DataType::QAsymmU8,
+        DataType::QSymmS16
     };
 
     supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
@@ -1411,9 +1411,9 @@ bool RefLayerSupport::IsQuantizeSupported(const TensorInfo& input,
 
     // Define supported output types.
     std::array<DataType,3> supportedOutputTypes = {
-        DataType::QuantisedAsymm8,
+        DataType::QAsymmU8,
         DataType::QSymmS8,
-        DataType::QuantisedSymm16
+        DataType::QSymmS16
     };
     supported &= CheckSupportRule(TypeAnyOf(output, supportedOutputTypes), reasonIfUnsupported,
                                   "Reference quantize: output type not supported.");
@@ -1435,8 +1435,8 @@ bool RefLayerSupport::IsReshapeSupported(const TensorInfo& input,
         DataType::Float32,
         DataType::Float16,
         DataType::Signed32,
-        DataType::QuantisedAsymm8,
-        DataType::QuantisedSymm16
+        DataType::QAsymmU8,
+        DataType::QSymmS16
     };
     return CheckSupportRule(TypeAnyOf(input, supportedOutputTypes), reasonIfUnsupported,
         "Reference reshape: input type not supported.");
@@ -1451,8 +1451,8 @@ bool RefLayerSupport::IsResizeBilinearSupported(const TensorInfo& input,
     {
         DataType::Float32,
         DataType::Float16,
-        DataType::QuantisedAsymm8,
-        DataType::QuantisedSymm16
+        DataType::QAsymmU8,
+        DataType::QSymmS16
     };
 
     supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
@@ -1478,8 +1478,8 @@ bool RefLayerSupport::IsResizeSupported(const TensorInfo& input,
     {
         DataType::Float32,
         DataType::Float16,
-        DataType::QuantisedAsymm8,
-        DataType::QuantisedSymm16
+        DataType::QAsymmU8,
+        DataType::QSymmS16
     };
 
     supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
@@ -1503,8 +1503,8 @@ bool RefLayerSupport::IsRsqrtSupported(const TensorInfo& input,
     {
             DataType::Float32,
             DataType::Float16,
-            DataType::QuantisedAsymm8,
-            DataType::QuantisedSymm16
+            DataType::QAsymmU8,
+            DataType::QSymmS16
     };
 
     supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
@@ -1533,8 +1533,8 @@ bool RefLayerSupport::IsSliceSupported(const TensorInfo& input,
     std::array<DataType, 3> supportedTypes =
     {
         DataType::Float32,
-        DataType::QuantisedAsymm8,
-        DataType::QuantisedSymm16
+        DataType::QAsymmU8,
+        DataType::QSymmS16
     };
 
     supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
@@ -1560,8 +1560,8 @@ bool RefLayerSupport::IsSoftmaxSupported(const TensorInfo& input,
     {
             DataType::Float32,
             DataType::Float16,
-            DataType::QuantisedAsymm8,
-            DataType::QuantisedSymm16
+            DataType::QAsymmU8,
+            DataType::QSymmS16
     };
 
     supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
@@ -1587,8 +1587,8 @@ bool RefLayerSupport::IsSpaceToBatchNdSupported(const TensorInfo& input,
     {
             DataType::Float32,
             DataType::Float16,
-            DataType::QuantisedAsymm8,
-            DataType::QuantisedSymm16
+            DataType::QAsymmU8,
+            DataType::QSymmS16
     };
 
     supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
@@ -1616,8 +1616,8 @@ bool RefLayerSupport::IsSpaceToDepthSupported(const TensorInfo& input,
     {
         DataType::Float32,
         DataType::Float16,
-        DataType::QuantisedAsymm8,
-        DataType::QuantisedSymm16
+        DataType::QAsymmU8,
+        DataType::QSymmS16
     };
 
     supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
@@ -1642,8 +1642,8 @@ bool RefLayerSupport::IsSplitterSupported(const TensorInfo& input,
     {
         DataType::Float32,
         DataType::Float16,
-        DataType::QuantisedAsymm8,
-        DataType::QuantisedSymm16
+        DataType::QAsymmU8,
+        DataType::QSymmS16
     };
 
     supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
@@ -1663,8 +1663,8 @@ bool RefLayerSupport::IsSplitterSupported(const TensorInfo& input,
     {
         DataType::Float32,
         DataType::Float16,
-        DataType::QuantisedAsymm8,
-        DataType::QuantisedSymm16
+        DataType::QAsymmU8,
+        DataType::QSymmS16
     };
 
     supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
@@ -1693,8 +1693,8 @@ bool RefLayerSupport::IsStackSupported(const std::vector<const TensorInfo*>& inp
     {
         DataType::Float32,
         DataType::Float16,
-        DataType::QuantisedAsymm8,
-        DataType::QuantisedSymm16
+        DataType::QAsymmU8,
+        DataType::QSymmS16
     };
 
     supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
@@ -1723,8 +1723,8 @@ bool RefLayerSupport::IsStridedSliceSupported(const TensorInfo& input,
     std::array<DataType,3> supportedTypes =
     {
         DataType::Float32,
-        DataType::QuantisedAsymm8,
-        DataType::QuantisedSymm16
+        DataType::QAsymmU8,
+        DataType::QSymmS16
     };
 
     supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
@@ -1749,8 +1749,8 @@ bool RefLayerSupport::IsSubtractionSupported(const TensorInfo& input0,
     std::array<DataType,4> supportedTypes = {
         DataType::Float32,
         DataType::Float16,
-        DataType::QuantisedAsymm8,
-        DataType::QuantisedSymm16
+        DataType::QAsymmU8,
+        DataType::QSymmS16
     };
 
     supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
@@ -1785,8 +1785,8 @@ bool RefLayerSupport::IsPreluSupported(const TensorInfo& input,
     {
         DataType::Float32,
         DataType::Float16,
-        DataType::QuantisedAsymm8,
-        DataType::QuantisedSymm16
+        DataType::QAsymmU8,
+        DataType::QSymmS16
     };
 
     supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
@@ -1821,8 +1821,8 @@ bool RefLayerSupport::IsTransposeConvolution2dSupported(const TensorInfo& input,
     {
             DataType::Float32,
             DataType::Float16,
-            DataType::QuantisedAsymm8,
-            DataType::QuantisedSymm16
+            DataType::QAsymmU8,
+            DataType::QSymmS16
     };
 
     supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
@@ -1836,11 +1836,11 @@ bool RefLayerSupport::IsTransposeConvolution2dSupported(const TensorInfo& input,
 
 
     const DataType inputType = input.GetDataType();
-    if (inputType == DataType::QuantisedAsymm8)
+    if (inputType == DataType::QAsymmU8)
     {
         std::array<DataType, 2> supportedWeightTypes =
         {
-            DataType::QuantisedAsymm8,
+            DataType::QAsymmU8,
             DataType::QuantizedSymm8PerAxis
         };
 
index dffb13db2d3ff24f126a3697ce4bad1341782ff8..b3a0c859fb0a39c423ba24bf88184c2178c23e13 100644 (file)
@@ -51,7 +51,7 @@ bool IsFloat16(const WorkloadInfo& info)
 
 bool IsQSymm16(const WorkloadInfo& info)
 {
-    return IsDataType<DataType::QuantisedSymm16>(info);
+    return IsDataType<DataType::QSymmS16>(info);
 }
 
 RefWorkloadFactory::RefWorkloadFactory(const std::shared_ptr<RefMemoryManager>& memoryManager)
index 580d8550f0e6b763a334d870a6da32354b1610ff..23a8e9b9e92a2e253f9520d832ea87512d10da8b 100644 (file)
@@ -68,7 +68,7 @@ BOOST_AUTO_TEST_CASE(CreateActivationFloat32Workload)
 
 BOOST_AUTO_TEST_CASE(CreateActivationUint8Workload)
 {
-    RefCreateActivationWorkloadTest<RefActivationWorkload, armnn::DataType::QuantisedAsymm8>();
+    RefCreateActivationWorkloadTest<RefActivationWorkload, armnn::DataType::QAsymmU8>();
 }
 
 template <typename WorkloadType,
@@ -101,7 +101,7 @@ BOOST_AUTO_TEST_CASE(CreateAdditionUint8Workload)
     RefCreateElementwiseWorkloadTest<RefAdditionWorkload,
         AdditionQueueDescriptor,
         AdditionLayer,
-        armnn::DataType::QuantisedAsymm8>();
+        armnn::DataType::QAsymmU8>();
 }
 
 BOOST_AUTO_TEST_CASE(CreateAdditionInt16Workload)
@@ -109,7 +109,7 @@ BOOST_AUTO_TEST_CASE(CreateAdditionInt16Workload)
     RefCreateElementwiseWorkloadTest<RefAdditionWorkload,
         AdditionQueueDescriptor,
         AdditionLayer,
-        armnn::DataType::QuantisedSymm16>();
+        armnn::DataType::QSymmS16>();
 }
 
 BOOST_AUTO_TEST_CASE(CreateSubtractionFloat32Workload)
@@ -133,7 +133,7 @@ BOOST_AUTO_TEST_CASE(CreateSubtractionUint8Workload)
     RefCreateElementwiseWorkloadTest<RefSubtractionWorkload,
         SubtractionQueueDescriptor,
         SubtractionLayer,
-        armnn::DataType::QuantisedAsymm8>();
+        armnn::DataType::QAsymmU8>();
 }
 
 BOOST_AUTO_TEST_CASE(CreateSubtractionInt16Workload)
@@ -141,7 +141,7 @@ BOOST_AUTO_TEST_CASE(CreateSubtractionInt16Workload)
     RefCreateElementwiseWorkloadTest<RefSubtractionWorkload,
         SubtractionQueueDescriptor,
         SubtractionLayer,
-        armnn::DataType::QuantisedSymm16>();
+        armnn::DataType::QSymmS16>();
 }
 
 BOOST_AUTO_TEST_CASE(CreateMultiplicationFloatWorkload)
@@ -157,7 +157,7 @@ BOOST_AUTO_TEST_CASE(CreateMultiplicationUint8Workload)
     RefCreateElementwiseWorkloadTest<RefMultiplicationWorkload,
         MultiplicationQueueDescriptor,
         MultiplicationLayer,
-        armnn::DataType::QuantisedAsymm8>();
+        armnn::DataType::QAsymmU8>();
 }
 
 BOOST_AUTO_TEST_CASE(CreateMultiplicationInt16Workload)
@@ -165,7 +165,7 @@ BOOST_AUTO_TEST_CASE(CreateMultiplicationInt16Workload)
     RefCreateElementwiseWorkloadTest<RefMultiplicationWorkload,
         MultiplicationQueueDescriptor,
         MultiplicationLayer,
-        armnn::DataType::QuantisedSymm16>();
+        armnn::DataType::QSymmS16>();
 }
 
 BOOST_AUTO_TEST_CASE(CreateDivisionFloat32Workload)
@@ -189,7 +189,7 @@ BOOST_AUTO_TEST_CASE(CreateDivisionUint8Workload)
     RefCreateElementwiseWorkloadTest<RefDivisionWorkload,
         DivisionQueueDescriptor,
         DivisionLayer,
-        armnn::DataType::QuantisedAsymm8>();
+        armnn::DataType::QAsymmU8>();
 }
 
 BOOST_AUTO_TEST_CASE(CreateDivisionInt16Workload)
@@ -197,7 +197,7 @@ BOOST_AUTO_TEST_CASE(CreateDivisionInt16Workload)
     RefCreateElementwiseWorkloadTest<RefDivisionWorkload,
         DivisionQueueDescriptor,
         DivisionLayer,
-        armnn::DataType::QuantisedSymm16>();
+        armnn::DataType::QSymmS16>();
 }
 
 template <typename BatchNormalizationWorkloadType, armnn::DataType DataType>
@@ -255,25 +255,25 @@ BOOST_AUTO_TEST_CASE(CreateBatchNormalizationFloat16WorkloadNhwc)
 
 BOOST_AUTO_TEST_CASE(CreateBatchNormalizationUint8Workload)
 {
-    RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationWorkload, armnn::DataType::QuantisedAsymm8>
+    RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationWorkload, armnn::DataType::QAsymmU8>
             (DataLayout::NCHW);
 }
 
 BOOST_AUTO_TEST_CASE(CreateBatchNormalizationUint8WorkloadNhwc)
 {
-    RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationWorkload, armnn::DataType::QuantisedAsymm8>
+    RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationWorkload, armnn::DataType::QAsymmU8>
             (DataLayout::NHWC);
 }
 
 BOOST_AUTO_TEST_CASE(CreateBatchNormalizationInt16Workload)
 {
-    RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationWorkload, armnn::DataType::QuantisedSymm16>
+    RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationWorkload, armnn::DataType::QSymmS16>
             (DataLayout::NCHW);
 }
 
 BOOST_AUTO_TEST_CASE(CreateBatchNormalizationInt16WorkloadNhwc)
 {
-    RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationWorkload, armnn::DataType::QuantisedSymm16>
+    RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationWorkload, armnn::DataType::QSymmS16>
             (DataLayout::NHWC);
 }
 
@@ -358,8 +358,8 @@ static void RefCreateFullyConnectedWorkloadTest()
     auto workload = CreateFullyConnectedWorkloadTest<FullyConnectedWorkloadType, DataType>(factory, graph);
 
     // Checks that outputs and inputs are as we expect them (see definition of CreateFullyConnectedWorkloadTest).
-    float inputsQScale = DataType == armnn::DataType::QuantisedAsymm8 ? 1.0f : 0.0;
-    float outputQScale = DataType == armnn::DataType::QuantisedAsymm8 ? 2.0f : 0.0;
+    float inputsQScale = DataType == armnn::DataType::QAsymmU8 ? 1.0f : 0.0;
+    float outputQScale = DataType == armnn::DataType::QAsymmU8 ? 2.0f : 0.0;
     CheckInputOutput(std::move(workload),
         TensorInfo({ 3, 1, 4, 5 }, DataType, inputsQScale),
         TensorInfo({ 3, 7 }, DataType, outputQScale));
@@ -372,12 +372,12 @@ BOOST_AUTO_TEST_CASE(CreateFullyConnectedWorkloadFloat32)
 
 BOOST_AUTO_TEST_CASE(CreateFullyConnectedWorkloadQuantisedAsymm8)
 {
-    RefCreateFullyConnectedWorkloadTest<RefFullyConnectedWorkload, armnn::DataType::QuantisedAsymm8>();
+    RefCreateFullyConnectedWorkloadTest<RefFullyConnectedWorkload, armnn::DataType::QAsymmU8>();
 }
 
 BOOST_AUTO_TEST_CASE(CreateFullyConnectedWorkloadQuantisedSymm16)
 {
-    RefCreateFullyConnectedWorkloadTest<RefFullyConnectedWorkload, armnn::DataType::QuantisedSymm16>();
+    RefCreateFullyConnectedWorkloadTest<RefFullyConnectedWorkload, armnn::DataType::QSymmS16>();
 }
 
 template <typename NormalizationWorkloadType, armnn::DataType DataType>
@@ -419,22 +419,22 @@ BOOST_AUTO_TEST_CASE(CreateRefNormalizationFloat32NhwcWorkload)
 
 BOOST_AUTO_TEST_CASE(CreateRefNormalizationUint8NchwWorkload)
 {
-    RefCreateNormalizationWorkloadTest<RefNormalizationWorkload, armnn::DataType::QuantisedAsymm8>(DataLayout::NCHW);
+    RefCreateNormalizationWorkloadTest<RefNormalizationWorkload, armnn::DataType::QAsymmU8>(DataLayout::NCHW);
 }
 
 BOOST_AUTO_TEST_CASE(CreateRefNormalizationUint8NhwcWorkload)
 {
-    RefCreateNormalizationWorkloadTest<RefNormalizationWorkload, armnn::DataType::QuantisedAsymm8>(DataLayout::NHWC);
+    RefCreateNormalizationWorkloadTest<RefNormalizationWorkload, armnn::DataType::QAsymmU8>(DataLayout::NHWC);
 }
 
 BOOST_AUTO_TEST_CASE(CreateRefNormalizationInt16NchwWorkload)
 {
-    RefCreateNormalizationWorkloadTest<RefNormalizationWorkload, armnn::DataType::QuantisedSymm16>(DataLayout::NCHW);
+    RefCreateNormalizationWorkloadTest<RefNormalizationWorkload, armnn::DataType::QSymmS16>(DataLayout::NCHW);
 }
 
 BOOST_AUTO_TEST_CASE(CreateRefNormalizationInt16NhwcWorkload)
 {
-    RefCreateNormalizationWorkloadTest<RefNormalizationWorkload, armnn::DataType::QuantisedSymm16>(DataLayout::NHWC);
+    RefCreateNormalizationWorkloadTest<RefNormalizationWorkload, armnn::DataType::QSymmS16>(DataLayout::NHWC);
 }
 
 template <typename Pooling2dWorkloadType, armnn::DataType DataType>
@@ -477,22 +477,22 @@ BOOST_AUTO_TEST_CASE(CreatePooling2dFloat32NhwcWorkload)
 
 BOOST_AUTO_TEST_CASE(CreatePooling2dUint8Workload)
 {
-    RefCreatePooling2dWorkloadTest<RefPooling2dWorkload, armnn::DataType::QuantisedAsymm8>(DataLayout::NCHW);
+    RefCreatePooling2dWorkloadTest<RefPooling2dWorkload, armnn::DataType::QAsymmU8>(DataLayout::NCHW);
 }
 
 BOOST_AUTO_TEST_CASE(CreatePooling2dUint8NhwcWorkload)
 {
-    RefCreatePooling2dWorkloadTest<RefPooling2dWorkload, armnn::DataType::QuantisedAsymm8>(DataLayout::NHWC);
+    RefCreatePooling2dWorkloadTest<RefPooling2dWorkload, armnn::DataType::QAsymmU8>(DataLayout::NHWC);
 }
 
 BOOST_AUTO_TEST_CASE(CreatePooling2dInt16Workload)
 {
-    RefCreatePooling2dWorkloadTest<RefPooling2dWorkload, armnn::DataType::QuantisedSymm16>(DataLayout::NCHW);
+    RefCreatePooling2dWorkloadTest<RefPooling2dWorkload, armnn::DataType::QSymmS16>(DataLayout::NCHW);
 }
 
 BOOST_AUTO_TEST_CASE(CreatePooling2dInt16NhwcWorkload)
 {
-    RefCreatePooling2dWorkloadTest<RefPooling2dWorkload, armnn::DataType::QuantisedSymm16>(DataLayout::NHWC);
+    RefCreatePooling2dWorkloadTest<RefPooling2dWorkload, armnn::DataType::QSymmS16>(DataLayout::NHWC);
 }
 
 template <typename SoftmaxWorkloadType, armnn::DataType DataType>
@@ -521,12 +521,12 @@ BOOST_AUTO_TEST_CASE(CreateSoftmaxFloat16Workload)
 
 BOOST_AUTO_TEST_CASE(CreateSoftmaxQuantisedAsymm8Workload)
 {
-    RefCreateSoftmaxWorkloadTest<RefSoftmaxWorkload, armnn::DataType::QuantisedAsymm8>();
+    RefCreateSoftmaxWorkloadTest<RefSoftmaxWorkload, armnn::DataType::QAsymmU8>();
 }
 
 BOOST_AUTO_TEST_CASE(CreateSoftmaxQuantisedSymm16Workload)
 {
-    RefCreateSoftmaxWorkloadTest<RefSoftmaxWorkload, armnn::DataType::QuantisedSymm16>();
+    RefCreateSoftmaxWorkloadTest<RefSoftmaxWorkload, armnn::DataType::QSymmS16>();
 }
 
 template <typename SplitterWorkloadType, armnn::DataType DataType>
@@ -563,7 +563,7 @@ BOOST_AUTO_TEST_CASE(CreateSplitterFloat16Workload)
 
 BOOST_AUTO_TEST_CASE(CreateSplitterUint8Workload)
 {
-    RefCreateSplitterWorkloadTest<RefSplitterWorkload, armnn::DataType::QuantisedAsymm8>();
+    RefCreateSplitterWorkloadTest<RefSplitterWorkload, armnn::DataType::QAsymmU8>();
 }
 
 template <typename SplitterWorkloadType, typename ConcatWorkloadType, armnn::DataType DataType>
@@ -611,7 +611,7 @@ BOOST_AUTO_TEST_CASE(CreateSplitterConcatFloat16)
 
 BOOST_AUTO_TEST_CASE(CreateSplitterConcatUint8)
 {
-    RefCreateSplitterConcatWorkloadTest<RefSplitterWorkload, RefConcatWorkload, DataType::QuantisedAsymm8>();
+    RefCreateSplitterConcatWorkloadTest<RefSplitterWorkload, RefConcatWorkload, DataType::QAsymmU8>();
 }
 
 template <typename SplitterWorkloadType, typename ActivationWorkloadType, armnn::DataType DataType>
@@ -661,7 +661,7 @@ BOOST_AUTO_TEST_CASE(CreateSingleOutputMultipleInputsFloat32)
 BOOST_AUTO_TEST_CASE(CreateSingleOutputMultipleInputsUint8)
 {
     RefCreateSingleOutputMultipleInputsTest<RefSplitterWorkload, RefActivationWorkload,
-        armnn::DataType::QuantisedAsymm8>();
+        armnn::DataType::QAsymmU8>();
 }
 
 template <typename ResizeBilinearWorkloadType, armnn::DataType DataType>
@@ -704,12 +704,12 @@ BOOST_AUTO_TEST_CASE(CreateResizeBilinearFloat16)
 
 BOOST_AUTO_TEST_CASE(CreateResizeBilinearUint8)
 {
-    RefCreateResizeBilinearTest<RefResizeWorkload, armnn::DataType::QuantisedAsymm8>(DataLayout::NCHW);
+    RefCreateResizeBilinearTest<RefResizeWorkload, armnn::DataType::QAsymmU8>(DataLayout::NCHW);
 }
 
 BOOST_AUTO_TEST_CASE(CreateResizeBilinearQuantisedAsymm16)
 {
-    RefCreateResizeBilinearTest<RefResizeWorkload, armnn::DataType::QuantisedSymm16>(DataLayout::NCHW);
+    RefCreateResizeBilinearTest<RefResizeWorkload, armnn::DataType::QSymmS16>(DataLayout::NCHW);
 }
 
 BOOST_AUTO_TEST_CASE(CreateResizeBilinearFloat32Nhwc)
@@ -744,12 +744,12 @@ BOOST_AUTO_TEST_CASE(CreateRsqrtFloat16)
 
 BOOST_AUTO_TEST_CASE(CreateRsqrtUint8)
 {
-    RefCreateRsqrtTest<RefRsqrtWorkload, armnn::DataType::QuantisedAsymm8>();
+    RefCreateRsqrtTest<RefRsqrtWorkload, armnn::DataType::QAsymmU8>();
 }
 
 BOOST_AUTO_TEST_CASE(CreateRsqrtQsymm16)
 {
-    RefCreateRsqrtTest<RefRsqrtWorkload, armnn::DataType::QuantisedSymm16>();
+    RefCreateRsqrtTest<RefRsqrtWorkload, armnn::DataType::QSymmS16>();
 }
 
 template <typename BatchToSpaceNdWorkloadType, armnn::DataType DataType>
@@ -777,12 +777,12 @@ BOOST_AUTO_TEST_CASE(CreateBatchToSpaceNdFloat16)
 
 BOOST_AUTO_TEST_CASE(CreateBatchToSpaceNdUint8)
 {
-    RefCreateBatchToSpaceNdTest<RefBatchToSpaceNdWorkload, armnn::DataType::QuantisedAsymm8>();
+    RefCreateBatchToSpaceNdTest<RefBatchToSpaceNdWorkload, armnn::DataType::QAsymmU8>();
 }
 
 BOOST_AUTO_TEST_CASE(CreateBatchToSpaceNdQSymm16)
 {
-    RefCreateBatchToSpaceNdTest<RefBatchToSpaceNdWorkload, armnn::DataType::QuantisedSymm16>();
+    RefCreateBatchToSpaceNdTest<RefBatchToSpaceNdWorkload, armnn::DataType::QSymmS16>();
 }
 
 template <typename L2NormalizationWorkloadType, armnn::DataType DataType>
@@ -825,22 +825,22 @@ BOOST_AUTO_TEST_CASE(CreateL2NormalizationFloat32Nhwc)
 
 BOOST_AUTO_TEST_CASE(CreateL2NormalizationInt16)
 {
-    RefCreateL2NormalizationTest<RefL2NormalizationWorkload, armnn::DataType::QuantisedSymm16>(DataLayout::NCHW);
+    RefCreateL2NormalizationTest<RefL2NormalizationWorkload, armnn::DataType::QSymmS16>(DataLayout::NCHW);
 }
 
 BOOST_AUTO_TEST_CASE(CreateL2NormalizationInt16Nhwc)
 {
-    RefCreateL2NormalizationTest<RefL2NormalizationWorkload, armnn::DataType::QuantisedSymm16>(DataLayout::NHWC);
+    RefCreateL2NormalizationTest<RefL2NormalizationWorkload, armnn::DataType::QSymmS16>(DataLayout::NHWC);
 }
 
 BOOST_AUTO_TEST_CASE(CreateL2NormalizationUint8)
 {
-    RefCreateL2NormalizationTest<RefL2NormalizationWorkload, armnn::DataType::QuantisedAsymm8>(DataLayout::NCHW);
+    RefCreateL2NormalizationTest<RefL2NormalizationWorkload, armnn::DataType::QAsymmU8>(DataLayout::NCHW);
 }
 
 BOOST_AUTO_TEST_CASE(CreateL2NormalizationUint8Nhwc)
 {
-    RefCreateL2NormalizationTest<RefL2NormalizationWorkload, armnn::DataType::QuantisedAsymm8>(DataLayout::NHWC);
+    RefCreateL2NormalizationTest<RefL2NormalizationWorkload, armnn::DataType::QAsymmU8>(DataLayout::NHWC);
 }
 
 template <typename ReshapeWorkloadType, armnn::DataType DataType>
@@ -864,12 +864,12 @@ BOOST_AUTO_TEST_CASE(CreateReshapeWorkloadFloat32)
 
 BOOST_AUTO_TEST_CASE(CreateReshapeWorkloadQuantisedAsymm8)
 {
-    RefCreateReshapeWorkloadTest<RefReshapeWorkload, armnn::DataType::QuantisedAsymm8>();
+    RefCreateReshapeWorkloadTest<RefReshapeWorkload, armnn::DataType::QAsymmU8>();
 }
 
 BOOST_AUTO_TEST_CASE(CreateReshapeWorkloadQuantisedSymm16)
 {
-    RefCreateReshapeWorkloadTest<RefReshapeWorkload, armnn::DataType::QuantisedSymm16>();
+    RefCreateReshapeWorkloadTest<RefReshapeWorkload, armnn::DataType::QSymmS16>();
 }
 
 template <typename ConcatWorkloadType, armnn::DataType DataType>
@@ -898,12 +898,12 @@ BOOST_AUTO_TEST_CASE(CreateConcatDim0Float16Workload)
 
 BOOST_AUTO_TEST_CASE(CreateConcatDim0Uint8Workload)
 {
-    RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::QuantisedAsymm8>({ 4, 3, 2, 5 }, 0);
+    RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::QAsymmU8>({ 4, 3, 2, 5 }, 0);
 }
 
 BOOST_AUTO_TEST_CASE(CreateConcatDim0Uint16Workload)
 {
-    RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::QuantisedSymm16>({ 4, 3, 2, 5 }, 0);
+    RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::QSymmS16>({ 4, 3, 2, 5 }, 0);
 }
 
 BOOST_AUTO_TEST_CASE(CreateConcatDim1Float32Workload)
@@ -913,7 +913,7 @@ BOOST_AUTO_TEST_CASE(CreateConcatDim1Float32Workload)
 
 BOOST_AUTO_TEST_CASE(CreateConcatDim1Uint8Workload)
 {
-    RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::QuantisedAsymm8>({ 2, 6, 2, 5 }, 1);
+    RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::QAsymmU8>({ 2, 6, 2, 5 }, 1);
 }
 
 BOOST_AUTO_TEST_CASE(CreateConcatDim2Float32Workload)
@@ -923,7 +923,7 @@ BOOST_AUTO_TEST_CASE(CreateConcatDim2Float32Workload)
 
 BOOST_AUTO_TEST_CASE(CreateConcatDim2Uint8Workload)
 {
-    RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::QuantisedAsymm8>({ 2, 3, 4, 5 }, 2);
+    RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::QAsymmU8>({ 2, 3, 4, 5 }, 2);
 }
 
 BOOST_AUTO_TEST_CASE(CreateConcatDim3Float32Workload)
@@ -933,7 +933,7 @@ BOOST_AUTO_TEST_CASE(CreateConcatDim3Float32Workload)
 
 BOOST_AUTO_TEST_CASE(CreateConcatDim3Uint8Workload)
 {
-    RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::QuantisedAsymm8>({ 2, 3, 2, 10 }, 3);
+    RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::QAsymmU8>({ 2, 3, 2, 10 }, 3);
 }
 
 template <typename ConstantWorkloadType, armnn::DataType DataType>
@@ -951,12 +951,12 @@ static void RefCreateConstantWorkloadTest(const armnn::TensorShape& outputShape)
 
 BOOST_AUTO_TEST_CASE(CreateConstantUint8Workload)
 {
-    RefCreateConstantWorkloadTest<RefConstantWorkload, armnn::DataType::QuantisedAsymm8>({ 2, 3, 2, 10 });
+    RefCreateConstantWorkloadTest<RefConstantWorkload, armnn::DataType::QAsymmU8>({ 2, 3, 2, 10 });
 }
 
 BOOST_AUTO_TEST_CASE(CreateConstantInt16Workload)
 {
-    RefCreateConstantWorkloadTest<RefConstantWorkload, armnn::DataType::QuantisedSymm16>({ 2, 3, 2, 10 });
+    RefCreateConstantWorkloadTest<RefConstantWorkload, armnn::DataType::QSymmS16>({ 2, 3, 2, 10 });
 }
 
 BOOST_AUTO_TEST_CASE(CreateConstantFloat32Workload)
@@ -1001,12 +1001,12 @@ BOOST_AUTO_TEST_CASE(CreatePreluFloat16Workload)
 
 BOOST_AUTO_TEST_CASE(CreatePreluUint8Workload)
 {
-    RefCreatePreluWorkloadTest({ 1, 4, 1, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 }, armnn::DataType::QuantisedAsymm8);
+    RefCreatePreluWorkloadTest({ 1, 4, 1, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 }, armnn::DataType::QAsymmU8);
 }
 
 BOOST_AUTO_TEST_CASE(CreatePreluInt16Workload)
 {
-    RefCreatePreluWorkloadTest({ 1, 4, 1, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 }, armnn::DataType::QuantisedSymm16);
+    RefCreatePreluWorkloadTest({ 1, 4, 1, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 }, armnn::DataType::QSymmS16);
 }
 
 BOOST_AUTO_TEST_CASE(CreatePreluFloat32NoBroadcastWorkload)
@@ -1026,14 +1026,14 @@ BOOST_AUTO_TEST_CASE(CreatePreluFloat16NoBroadcastWorkload)
 BOOST_AUTO_TEST_CASE(CreatePreluUint8NoBroadcastWorkload)
 {
     BOOST_CHECK_THROW(RefCreatePreluWorkloadTest({ 1, 4, 7, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 },
-                                                 armnn::DataType::QuantisedAsymm8),
+                                                 armnn::DataType::QAsymmU8),
                       armnn::InvalidArgumentException);
 }
 
 BOOST_AUTO_TEST_CASE(CreatePreluInt16NoBroadcastWorkload)
 {
     BOOST_CHECK_THROW(RefCreatePreluWorkloadTest({ 1, 4, 7, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 },
-                                                 armnn::DataType::QuantisedSymm16),
+                                                 armnn::DataType::QSymmS16),
                       armnn::InvalidArgumentException);
 }
 
@@ -1062,12 +1062,12 @@ BOOST_AUTO_TEST_CASE(CreateSpaceToDepthWorkloadFloat16)
 
 BOOST_AUTO_TEST_CASE(CreateSpaceToDepthWorkloadQASymm8)
 {
-    RefCreateSpaceToDepthWorkloadTest<RefSpaceToDepthWorkload, armnn::DataType::QuantisedAsymm8>();
+    RefCreateSpaceToDepthWorkloadTest<RefSpaceToDepthWorkload, armnn::DataType::QAsymmU8>();
 }
 
 BOOST_AUTO_TEST_CASE(CreateSpaceToDepthWorkloadQSymm16)
 {
-    RefCreateSpaceToDepthWorkloadTest<RefSpaceToDepthWorkload, armnn::DataType::QuantisedSymm16>();
+    RefCreateSpaceToDepthWorkloadTest<RefSpaceToDepthWorkload, armnn::DataType::QSymmS16>();
 }
 
 template <armnn::DataType DataType>
@@ -1103,12 +1103,12 @@ BOOST_AUTO_TEST_CASE(CreateStackFloat32Workload)
 
 BOOST_AUTO_TEST_CASE(CreateStackUint8Workload)
 {
-    RefCreateStackWorkloadTest<armnn::DataType::QuantisedAsymm8>({ 3, 4, 5 }, { 3, 4, 2, 5 }, 2, 2);
+    RefCreateStackWorkloadTest<armnn::DataType::QAsymmU8>({ 3, 4, 5 }, { 3, 4, 2, 5 }, 2, 2);
 }
 
 BOOST_AUTO_TEST_CASE(CreateStackUint16Workload)
 {
-    RefCreateStackWorkloadTest<armnn::DataType::QuantisedSymm16>({ 3, 4, 5 }, { 3, 4, 2, 5 }, 2, 2);
+    RefCreateStackWorkloadTest<armnn::DataType::QSymmS16>({ 3, 4, 5 }, { 3, 4, 2, 5 }, 2, 2);
 }
 
 BOOST_AUTO_TEST_SUITE_END()
index 4d8c82d900f0fbda25820962f9b87eb67ad5e304..75eccdee88630b305bf68f2be345a162bc378bcf 100644 (file)
@@ -37,12 +37,12 @@ BOOST_AUTO_TEST_CASE(RefAbsEndToEndTestFloat32)
 
 BOOST_AUTO_TEST_CASE(RefAbsEndToEndTestUint8)
 {
-    AbsEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+    AbsEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
 BOOST_AUTO_TEST_CASE(RefAbsEndToEndTestInt16)
 {
-    AbsEndToEnd<armnn::DataType::QuantisedSymm16>(defaultBackends);
+    AbsEndToEnd<armnn::DataType::QSymmS16>(defaultBackends);
 }
 
 // Constant
@@ -75,12 +75,12 @@ BOOST_AUTO_TEST_CASE(Unsigned8)
     softmax->GetOutputSlot(0).Connect(output->GetInputSlot(0));
 
     // Sets the tensors in the network.
-    TensorInfo inputTensorInfo(TensorShape({1, 5}), DataType::QuantisedAsymm8);
+    TensorInfo inputTensorInfo(TensorShape({1, 5}), DataType::QAsymmU8);
     inputTensorInfo.SetQuantizationOffset(100);
     inputTensorInfo.SetQuantizationScale(10000.0f);
     input->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
 
-    TensorInfo outputTensorInfo(TensorShape({1, 5}), DataType::QuantisedAsymm8);
+    TensorInfo outputTensorInfo(TensorShape({1, 5}), DataType::QAsymmU8);
     outputTensorInfo.SetQuantizationOffset(0);
     outputTensorInfo.SetQuantizationScale(1.0f/255.0f);
     softmax->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
@@ -369,7 +369,7 @@ BOOST_AUTO_TEST_CASE(RefEqualSimpleEndToEndUint8Test)
     const std::vector<uint8_t> expectedOutput({ 1, 1, 1, 1,  0, 0, 0, 0,
                                                 0, 0, 0, 0,  1, 1, 1, 1 });
 
-    ComparisonSimpleEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends,
+    ComparisonSimpleEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends,
                                                                ComparisonOperation::Equal,
                                                                expectedOutput);
 }
@@ -379,7 +379,7 @@ BOOST_AUTO_TEST_CASE(RefGreaterSimpleEndToEndUint8Test)
     const std::vector<uint8_t> expectedOutput({ 0, 0, 0, 0,  1, 1, 1, 1,
                                                 0, 0, 0, 0,  0, 0, 0, 0 });
 
-    ComparisonSimpleEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends,
+    ComparisonSimpleEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends,
                                                                ComparisonOperation::Greater,
                                                                expectedOutput);
 }
@@ -409,7 +409,7 @@ BOOST_AUTO_TEST_CASE(RefEqualBroadcastEndToEndUint8Test)
     const std::vector<uint8_t > expectedOutput({ 1, 0, 1, 1, 0, 0,
                                                  0, 0, 0, 0, 0, 0 });
 
-    ComparisonBroadcastEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends,
+    ComparisonBroadcastEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends,
                                                                   ComparisonOperation::Equal,
                                                                   expectedOutput);
 }
@@ -419,7 +419,7 @@ BOOST_AUTO_TEST_CASE(RefGreaterBroadcastEndToEndUint8Test)
     const std::vector<uint8_t> expectedOutput({ 0, 1, 0, 0, 0, 1,
                                                 1, 1, 1, 1, 1, 1 });
 
-    ComparisonBroadcastEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends,
+    ComparisonBroadcastEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends,
                                                                   ComparisonOperation::Greater,
                                                                   expectedOutput);
 }
@@ -431,12 +431,12 @@ BOOST_AUTO_TEST_CASE(RefBatchToSpaceNdEndToEndFloat32NHWCTest)
 
 BOOST_AUTO_TEST_CASE(RefBatchToSpaceNdEndToEndUint8NHWCTest)
 {
-    BatchToSpaceNdEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends, armnn::DataLayout::NHWC);
+    BatchToSpaceNdEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, armnn::DataLayout::NHWC);
 }
 
 BOOST_AUTO_TEST_CASE(RefBatchToSpaceNdEndToEndQSymm16NHWCTest)
 {
-    BatchToSpaceNdEndToEnd<armnn::DataType::QuantisedSymm16>(defaultBackends, armnn::DataLayout::NHWC);
+    BatchToSpaceNdEndToEnd<armnn::DataType::QSymmS16>(defaultBackends, armnn::DataLayout::NHWC);
 }
 
 BOOST_AUTO_TEST_CASE(RefBatchToSpaceNdEndToEndFloat32NCHWTest)
@@ -446,12 +446,12 @@ BOOST_AUTO_TEST_CASE(RefBatchToSpaceNdEndToEndFloat32NCHWTest)
 
 BOOST_AUTO_TEST_CASE(RefBatchToSpaceNdEndToEndUint8NCHWTest)
 {
-    BatchToSpaceNdEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends, armnn::DataLayout::NCHW);
+    BatchToSpaceNdEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, armnn::DataLayout::NCHW);
 }
 
 BOOST_AUTO_TEST_CASE(RefBatchToSpaceNdEndToEndQSymm16NCHWTest)
 {
-    BatchToSpaceNdEndToEnd<armnn::DataType::QuantisedSymm16>(defaultBackends, armnn::DataLayout::NCHW);
+    BatchToSpaceNdEndToEnd<armnn::DataType::QSymmS16>(defaultBackends, armnn::DataLayout::NCHW);
 }
 
 BOOST_AUTO_TEST_CASE(RefBatchToSpaceNdEndToEndComplexFloat32NHWCTest)
@@ -461,12 +461,12 @@ BOOST_AUTO_TEST_CASE(RefBatchToSpaceNdEndToEndComplexFloat32NHWCTest)
 
 BOOST_AUTO_TEST_CASE(RefBatchToSpaceNdEndToEndComplexUint8NHWCTest)
 {
-    BatchToSpaceNdComplexEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends, armnn::DataLayout::NHWC);
+    BatchToSpaceNdComplexEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, armnn::DataLayout::NHWC);
 }
 
 BOOST_AUTO_TEST_CASE(RefBatchToSpaceNdEndToEndComplexQSymm16NHWCTest)
 {
-    BatchToSpaceNdComplexEndToEnd<armnn::DataType::QuantisedSymm16>(defaultBackends, armnn::DataLayout::NHWC);
+    BatchToSpaceNdComplexEndToEnd<armnn::DataType::QSymmS16>(defaultBackends, armnn::DataLayout::NHWC);
 }
 
 BOOST_AUTO_TEST_CASE(RefBatchToSpaceNdEndToEndComplexFloat32NCHWTest)
@@ -476,12 +476,12 @@ BOOST_AUTO_TEST_CASE(RefBatchToSpaceNdEndToEndComplexFloat32NCHWTest)
 
 BOOST_AUTO_TEST_CASE(RefBatchToSpaceNdEndToEndComplexUint8NCHWTest)
 {
-    BatchToSpaceNdComplexEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends, armnn::DataLayout::NCHW);
+    BatchToSpaceNdComplexEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, armnn::DataLayout::NCHW);
 }
 
 BOOST_AUTO_TEST_CASE(RefBatchToSpaceNdEndToEndComplexQSymm16NCHWTest)
 {
-    BatchToSpaceNdComplexEndToEnd<armnn::DataType::QuantisedSymm16>(defaultBackends, armnn::DataLayout::NCHW);
+    BatchToSpaceNdComplexEndToEnd<armnn::DataType::QSymmS16>(defaultBackends, armnn::DataLayout::NCHW);
 }
 
 BOOST_AUTO_TEST_CASE(RefConcatEndToEndDim0Test)
@@ -491,7 +491,7 @@ BOOST_AUTO_TEST_CASE(RefConcatEndToEndDim0Test)
 
 BOOST_AUTO_TEST_CASE(RefConcatEndToEndDim0Uint8Test)
 {
-    ConcatDim0EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+    ConcatDim0EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
 BOOST_AUTO_TEST_CASE(RefConcatEndToEndDim1Test)
@@ -501,7 +501,7 @@ BOOST_AUTO_TEST_CASE(RefConcatEndToEndDim1Test)
 
 BOOST_AUTO_TEST_CASE(RefConcatEndToEndDim1Uint8Test)
 {
-    ConcatDim1EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+    ConcatDim1EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
 BOOST_AUTO_TEST_CASE(RefConcatEndToEndDim2Test)
@@ -511,7 +511,7 @@ BOOST_AUTO_TEST_CASE(RefConcatEndToEndDim2Test)
 
 BOOST_AUTO_TEST_CASE(RefConcatEndToEndDim2Uint8Test)
 {
-    ConcatDim2EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+    ConcatDim2EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
 BOOST_AUTO_TEST_CASE(RefConcatEndToEndDim3Test)
@@ -521,7 +521,7 @@ BOOST_AUTO_TEST_CASE(RefConcatEndToEndDim3Test)
 
 BOOST_AUTO_TEST_CASE(RefConcatEndToEndDim3Uint8Test)
 {
-    ConcatDim3EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+    ConcatDim3EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
 BOOST_AUTO_TEST_CASE(RefGatherFloatTest)
@@ -531,12 +531,12 @@ BOOST_AUTO_TEST_CASE(RefGatherFloatTest)
 
 BOOST_AUTO_TEST_CASE(RefGatherUint8Test)
 {
-    GatherEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+    GatherEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
 BOOST_AUTO_TEST_CASE(RefGatherInt16Test)
 {
-    GatherEndToEnd<armnn::DataType::QuantisedSymm16>(defaultBackends);
+    GatherEndToEnd<armnn::DataType::QSymmS16>(defaultBackends);
 }
 
 BOOST_AUTO_TEST_CASE(RefGatherMultiDimFloatTest)
@@ -546,12 +546,12 @@ BOOST_AUTO_TEST_CASE(RefGatherMultiDimFloatTest)
 
 BOOST_AUTO_TEST_CASE(RefGatherMultiDimUint8Test)
 {
-    GatherMultiDimEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+    GatherMultiDimEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
 BOOST_AUTO_TEST_CASE(RefGatherMultiDimInt16Test)
 {
-    GatherMultiDimEndToEnd<armnn::DataType::QuantisedSymm16>(defaultBackends);
+    GatherMultiDimEndToEnd<armnn::DataType::QSymmS16>(defaultBackends);
 }
 
 // DepthToSpace
@@ -567,12 +567,12 @@ BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNchwFloat16)
 
 BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNchwUint8)
 {
-    DepthToSpaceEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends, armnn::DataLayout::NCHW);
+    DepthToSpaceEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, armnn::DataLayout::NCHW);
 }
 
 BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNchwInt16)
 {
-    DepthToSpaceEndToEnd<armnn::DataType::QuantisedSymm16>(defaultBackends, armnn::DataLayout::NCHW);
+    DepthToSpaceEndToEnd<armnn::DataType::QSymmS16>(defaultBackends, armnn::DataLayout::NCHW);
 }
 
 BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNhwcFloat32)
@@ -587,33 +587,33 @@ BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNhwcFloat16)
 
 BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNhwcUint8)
 {
-    DepthToSpaceEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends, armnn::DataLayout::NHWC);
+    DepthToSpaceEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, armnn::DataLayout::NHWC);
 }
 
 BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNhwcInt16)
 {
-    DepthToSpaceEndToEnd<armnn::DataType::QuantisedSymm16>(defaultBackends, armnn::DataLayout::NHWC);
+    DepthToSpaceEndToEnd<armnn::DataType::QSymmS16>(defaultBackends, armnn::DataLayout::NHWC);
 }
 
 // Dequantize
 BOOST_AUTO_TEST_CASE(DequantizeEndToEndSimpleTest)
 {
-    DequantizeEndToEndSimple<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+    DequantizeEndToEndSimple<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
 BOOST_AUTO_TEST_CASE(DequantizeEndToEndOffsetTest)
 {
-    DequantizeEndToEndOffset<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+    DequantizeEndToEndOffset<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
 BOOST_AUTO_TEST_CASE(DequantizeEndToEndSimpleInt16Test)
 {
-    DequantizeEndToEndSimple<armnn::DataType::QuantisedSymm16>(defaultBackends);
+    DequantizeEndToEndSimple<armnn::DataType::QSymmS16>(defaultBackends);
 }
 
 BOOST_AUTO_TEST_CASE(DequantizeEndToEndOffsetInt16Test)
 {
-    DequantizeEndToEndOffset<armnn::DataType::QuantisedSymm16>(defaultBackends);
+    DequantizeEndToEndOffset<armnn::DataType::QSymmS16>(defaultBackends);
 }
 
 BOOST_AUTO_TEST_CASE(RefDetectionPostProcessRegularNmsTest)
@@ -697,7 +697,7 @@ BOOST_AUTO_TEST_CASE(RefDetectionPostProcessRegularNmsUint8Test)
     QuantizeData(qBoxEncodings.data(), boxEncodings.data(), boxEncodingsInfo);
     QuantizeData(qScores.data(), scores.data(), scoresInfo);
     QuantizeData(qAnchors.data(), anchors.data(), anchorsInfo);
-    DetectionPostProcessRegularNmsEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends, qBoxEncodings,
+    DetectionPostProcessRegularNmsEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, qBoxEncodings,
                                                                              qScores, qAnchors,
                                                                              1.0f, 1, 0.01f, 0, 0.5f, 0);
 }
@@ -775,7 +775,7 @@ BOOST_AUTO_TEST_CASE(RefDetectionPostProcessFastNmsUint8Test)
     QuantizeData(qBoxEncodings.data(), boxEncodings.data(), boxEncodingsInfo);
     QuantizeData(qScores.data(), scores.data(), scoresInfo);
     QuantizeData(qAnchors.data(), anchors.data(), anchorsInfo);
-    DetectionPostProcessFastNmsEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends, qBoxEncodings,
+    DetectionPostProcessFastNmsEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, qBoxEncodings,
                                                                           qScores, qAnchors,
                                                                           1.0f, 1, 0.01f, 0, 0.5f, 0);
 }
@@ -793,12 +793,12 @@ BOOST_AUTO_TEST_CASE(RefPreluEndToEndTestFloat32)
 
 BOOST_AUTO_TEST_CASE(RefPreluEndToEndTestUint8)
 {
-    PreluEndToEndPositiveTest<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+    PreluEndToEndPositiveTest<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
 BOOST_AUTO_TEST_CASE(RefPreluEndToEndTestQSymm16)
 {
-    PreluEndToEndPositiveTest<armnn::DataType::QuantisedSymm16>(defaultBackends);
+    PreluEndToEndPositiveTest<armnn::DataType::QSymmS16>(defaultBackends);
 }
 
 BOOST_AUTO_TEST_CASE(RefSpaceToDepthNhwcEndToEndTest1)
@@ -829,7 +829,7 @@ BOOST_AUTO_TEST_CASE(RefSplitter1dEndToEndTest)
 
 BOOST_AUTO_TEST_CASE(RefSplitter1dEndToEndUint8Test)
 {
-    Splitter1dEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+    Splitter1dEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
 BOOST_AUTO_TEST_CASE(RefSplitter2dDim0EndToEndTest)
@@ -844,12 +844,12 @@ BOOST_AUTO_TEST_CASE(RefSplitter2dDim1EndToEndTest)
 
 BOOST_AUTO_TEST_CASE(RefSplitter2dDim0EndToEndUint8Test)
 {
-    Splitter2dDim0EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+    Splitter2dDim0EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
 BOOST_AUTO_TEST_CASE(RefSplitter2dDim1EndToEndUint8Test)
 {
-    Splitter2dDim1EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+    Splitter2dDim1EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
 BOOST_AUTO_TEST_CASE(RefSplitter3dDim0EndToEndTest)
@@ -869,17 +869,17 @@ BOOST_AUTO_TEST_CASE(RefSplitter3dDim2EndToEndTest)
 
 BOOST_AUTO_TEST_CASE(RefSplitter3dDim0EndToEndUint8Test)
 {
-    Splitter3dDim0EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+    Splitter3dDim0EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
 BOOST_AUTO_TEST_CASE(RefSplitter3dDim1EndToEndUint8Test)
 {
-    Splitter3dDim1EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+    Splitter3dDim1EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
 BOOST_AUTO_TEST_CASE(RefSplitter3dDim2EndToEndUint8Test)
 {
-    Splitter3dDim2EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+    Splitter3dDim2EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
 BOOST_AUTO_TEST_CASE(RefSplitter4dDim0EndToEndTest)
@@ -904,22 +904,22 @@ BOOST_AUTO_TEST_CASE(RefSplitter4dDim3EndToEndTest)
 
 BOOST_AUTO_TEST_CASE(RefSplitter4dDim0EndToEndUint8Test)
 {
-    Splitter4dDim0EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+    Splitter4dDim0EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
 BOOST_AUTO_TEST_CASE(RefSplitter4dDim1EndToEndUint8Test)
 {
-    Splitter4dDim1EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+    Splitter4dDim1EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
 BOOST_AUTO_TEST_CASE(RefSplitter4dDim2EndToEndUint8Test)
 {
-    Splitter4dDim2EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+    Splitter4dDim2EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
 BOOST_AUTO_TEST_CASE(RefSplitter4dDim3EndToEndUint8Test)
 {
-    Splitter4dDim3EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+    Splitter4dDim3EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
 // TransposeConvolution2d
@@ -931,13 +931,13 @@ BOOST_AUTO_TEST_CASE(RefTransposeConvolution2dEndToEndFloatNchwTest)
 
 BOOST_AUTO_TEST_CASE(RefTransposeConvolution2dEndToEndUint8NchwTest)
 {
-    TransposeConvolution2dEndToEnd<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
+    TransposeConvolution2dEndToEnd<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
         defaultBackends, armnn::DataLayout::NCHW);
 }
 
 BOOST_AUTO_TEST_CASE(RefTransposeConvolution2dEndToEndInt16NchwTest)
 {
-    TransposeConvolution2dEndToEnd<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
+    TransposeConvolution2dEndToEnd<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
         defaultBackends, armnn::DataLayout::NCHW);
 }
 
@@ -949,13 +949,13 @@ BOOST_AUTO_TEST_CASE(RefTransposeConvolution2dEndToEndFloatNhwcTest)
 
 BOOST_AUTO_TEST_CASE(RefTransposeConvolution2dEndToEndUint8NhwcTest)
 {
-    TransposeConvolution2dEndToEnd<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
+    TransposeConvolution2dEndToEnd<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
         defaultBackends, armnn::DataLayout::NHWC);
 }
 
 BOOST_AUTO_TEST_CASE(RefTransposeConvolution2dEndToEndInt16NhwcTest)
 {
-    TransposeConvolution2dEndToEnd<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
+    TransposeConvolution2dEndToEnd<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
         defaultBackends, armnn::DataLayout::NHWC);
 }
 
@@ -967,12 +967,12 @@ BOOST_AUTO_TEST_CASE(RefResizeBilinearEndToEndFloatNchwTest)
 
 BOOST_AUTO_TEST_CASE(RefResizeBilinearEndToEndUint8NchwTest)
 {
-    ResizeBilinearEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends, armnn::DataLayout::NCHW);
+    ResizeBilinearEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, armnn::DataLayout::NCHW);
 }
 
 BOOST_AUTO_TEST_CASE(RefResizeBilinearEndToEndInt16NchwTest)
 {
-    ResizeBilinearEndToEnd<armnn::DataType::QuantisedSymm16>(defaultBackends, armnn::DataLayout::NCHW);
+    ResizeBilinearEndToEnd<armnn::DataType::QSymmS16>(defaultBackends, armnn::DataLayout::NCHW);
 }
 
 BOOST_AUTO_TEST_CASE(RefResizeBilinearEndToEndFloatNhwcTest)
@@ -982,12 +982,12 @@ BOOST_AUTO_TEST_CASE(RefResizeBilinearEndToEndFloatNhwcTest)
 
 BOOST_AUTO_TEST_CASE(RefResizeBilinearEndToEndUint8NhwcTest)
 {
-    ResizeBilinearEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends, armnn::DataLayout::NHWC);
+    ResizeBilinearEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, armnn::DataLayout::NHWC);
 }
 
 BOOST_AUTO_TEST_CASE(RefResizeBilinearEndToEndInt16NhwcTest)
 {
-    ResizeBilinearEndToEnd<armnn::DataType::QuantisedSymm16>(defaultBackends, armnn::DataLayout::NHWC);
+    ResizeBilinearEndToEnd<armnn::DataType::QSymmS16>(defaultBackends, armnn::DataLayout::NHWC);
 }
 
 // Resize NearestNeighbor
@@ -998,12 +998,12 @@ BOOST_AUTO_TEST_CASE(RefResizeNearestNeighborEndToEndFloatNchwTest)
 
 BOOST_AUTO_TEST_CASE(RefResizeNearestNeighborEndToEndUint8NchwTest)
 {
-    ResizeNearestNeighborEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends, armnn::DataLayout::NCHW);
+    ResizeNearestNeighborEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, armnn::DataLayout::NCHW);
 }
 
 BOOST_AUTO_TEST_CASE(RefResizeNearestNeighborEndToEndInt16NchwTest)
 {
-    ResizeNearestNeighborEndToEnd<armnn::DataType::QuantisedSymm16>(defaultBackends, armnn::DataLayout::NCHW);
+    ResizeNearestNeighborEndToEnd<armnn::DataType::QSymmS16>(defaultBackends, armnn::DataLayout::NCHW);
 }
 
 BOOST_AUTO_TEST_CASE(RefResizeNearestNeighborEndToEndFloatNhwcTest)
@@ -1013,12 +1013,12 @@ BOOST_AUTO_TEST_CASE(RefResizeNearestNeighborEndToEndFloatNhwcTest)
 
 BOOST_AUTO_TEST_CASE(RefResizeNearestNeighborEndToEndUint8NhwcTest)
 {
-    ResizeNearestNeighborEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends, armnn::DataLayout::NHWC);
+    ResizeNearestNeighborEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, armnn::DataLayout::NHWC);
 }
 
 BOOST_AUTO_TEST_CASE(RefResizeNearestNeighborEndToEndInt16NhwcTest)
 {
-    ResizeNearestNeighborEndToEnd<armnn::DataType::QuantisedSymm16>(defaultBackends, armnn::DataLayout::NHWC);
+    ResizeNearestNeighborEndToEnd<armnn::DataType::QSymmS16>(defaultBackends, armnn::DataLayout::NHWC);
 }
 
 // InstanceNormalization
@@ -1050,7 +1050,7 @@ BOOST_AUTO_TEST_CASE(RefArgMaxSimpleTest)
 
 BOOST_AUTO_TEST_CASE(RefArgMaxSimpleUint8Test)
 {
-    ArgMaxEndToEndSimple<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+    ArgMaxEndToEndSimple<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
 BOOST_AUTO_TEST_CASE(RefArgMinSimpleTest)
@@ -1060,7 +1060,7 @@ BOOST_AUTO_TEST_CASE(RefArgMinSimpleTest)
 
 BOOST_AUTO_TEST_CASE(RefArgMinSimpleUint8Test)
 {
-    ArgMinEndToEndSimple<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+    ArgMinEndToEndSimple<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
 BOOST_AUTO_TEST_CASE(RefArgMaxAxis0Test)
@@ -1070,7 +1070,7 @@ BOOST_AUTO_TEST_CASE(RefArgMaxAxis0Test)
 
 BOOST_AUTO_TEST_CASE(RefArgMaxAxis0Uint8Test)
 {
-    ArgMaxAxis0EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+    ArgMaxAxis0EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
 BOOST_AUTO_TEST_CASE(RefArgMinAxis0Test)
@@ -1081,7 +1081,7 @@ BOOST_AUTO_TEST_CASE(RefArgMinAxis0Test)
 BOOST_AUTO_TEST_CASE(RefArgMinAxis0Uint8Test)
 {
 
-    ArgMinAxis0EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+    ArgMinAxis0EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
 BOOST_AUTO_TEST_CASE(RefArgMaxAxis1Test)
@@ -1091,7 +1091,7 @@ BOOST_AUTO_TEST_CASE(RefArgMaxAxis1Test)
 
 BOOST_AUTO_TEST_CASE(RefArgMaxAxis1Uint8Test)
 {
-    ArgMaxAxis1EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+    ArgMaxAxis1EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
 BOOST_AUTO_TEST_CASE(RefArgMinAxis1Test)
@@ -1102,7 +1102,7 @@ BOOST_AUTO_TEST_CASE(RefArgMinAxis1Test)
 BOOST_AUTO_TEST_CASE(RefArgMinAxis1Uint8Test)
 {
 
-    ArgMinAxis1EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+    ArgMinAxis1EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
 BOOST_AUTO_TEST_CASE(RefArgMaxAxis2Test)
@@ -1112,7 +1112,7 @@ BOOST_AUTO_TEST_CASE(RefArgMaxAxis2Test)
 
 BOOST_AUTO_TEST_CASE(RefArgMaxAxis2Uint8Test)
 {
-    ArgMaxAxis2EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+    ArgMaxAxis2EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
 BOOST_AUTO_TEST_CASE(RefArgMinAxis2Test)
@@ -1123,7 +1123,7 @@ BOOST_AUTO_TEST_CASE(RefArgMinAxis2Test)
 BOOST_AUTO_TEST_CASE(RefArgMinAxis2Uint8Test)
 {
 
-    ArgMinAxis2EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+    ArgMinAxis2EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
 BOOST_AUTO_TEST_CASE(RefArgMaxAxis3Test)
@@ -1133,7 +1133,7 @@ BOOST_AUTO_TEST_CASE(RefArgMaxAxis3Test)
 
 BOOST_AUTO_TEST_CASE(RefArgMaxAxis3Uint8Test)
 {
-    ArgMaxAxis3EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+    ArgMaxAxis3EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
 BOOST_AUTO_TEST_CASE(RefArgMinAxis3Test)
@@ -1144,7 +1144,7 @@ BOOST_AUTO_TEST_CASE(RefArgMinAxis3Test)
 BOOST_AUTO_TEST_CASE(RefArgMinAxis3Uint8Test)
 {
 
-    ArgMinAxis3EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+    ArgMinAxis3EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
 #if !defined(__ANDROID__)
index 106a7f615719a43053ce92ed71693eb88c2206a9..ab0809d90bf23e1cb566c8fb19b617600311a76b 100644 (file)
@@ -63,13 +63,13 @@ BOOST_AUTO_TEST_CASE(IsLayerSupportedFloat32Reference)
 BOOST_AUTO_TEST_CASE(IsLayerSupportedUint8Reference)
 {
     armnn::RefWorkloadFactory factory;
-    IsLayerSupportedTests<armnn::RefWorkloadFactory, armnn::DataType::QuantisedAsymm8>(&factory);
+    IsLayerSupportedTests<armnn::RefWorkloadFactory, armnn::DataType::QAsymmU8>(&factory);
 }
 
 BOOST_AUTO_TEST_CASE(IsLayerSupportedInt16Reference)
 {
     armnn::RefWorkloadFactory factory;
-    IsLayerSupportedTests<armnn::RefWorkloadFactory, armnn::DataType::QuantisedSymm16>(&factory);
+    IsLayerSupportedTests<armnn::RefWorkloadFactory, armnn::DataType::QSymmS16>(&factory);
 }
 
 BOOST_AUTO_TEST_CASE(IsConvertFp16ToFp32SupportedReference)
index b88f432acfc099911e98a1c1debf2e0776667c3f..b0d8db802ef69f5d91b9396ce01fc7b2058d004b 100644 (file)
@@ -79,19 +79,19 @@ ARMNN_AUTO_TEST_CASE(Convolution2d3x3Dilation3x3Nhwc,
                      false,
                      DataLayout::NHWC)
 ARMNN_AUTO_TEST_CASE(Convolution2d3x3Dilation3x3Uint8,
-                     Convolution2d3x3Dilation3x3Test<DataType::QuantisedAsymm8, DataType::Signed32>,
+                     Convolution2d3x3Dilation3x3Test<DataType::QAsymmU8, DataType::Signed32>,
                      false,
                      DataLayout::NCHW)
 ARMNN_AUTO_TEST_CASE(Convolution2d3x3Dilation3x3NhwcUint8,
-                     Convolution2d3x3Dilation3x3Test<DataType::QuantisedAsymm8, DataType::Signed32>,
+                     Convolution2d3x3Dilation3x3Test<DataType::QAsymmU8, DataType::Signed32>,
                      false,
                      DataLayout::NHWC)
 ARMNN_AUTO_TEST_CASE(Convolution2d3x3Dilation3x3Int16,
-                     Convolution2d3x3Dilation3x3Test<DataType::QuantisedSymm16, DataType::Signed32>,
+                     Convolution2d3x3Dilation3x3Test<DataType::QSymmS16, DataType::Signed32>,
                      false,
                      DataLayout::NCHW)
 ARMNN_AUTO_TEST_CASE(Convolution2d3x3Dilation3x3NhwcInt16,
-                     Convolution2d3x3Dilation3x3Test<DataType::QuantisedSymm16, DataType::Signed32>,
+                     Convolution2d3x3Dilation3x3Test<DataType::QSymmS16, DataType::Signed32>,
                      false,
                      DataLayout::NHWC)
 
@@ -104,19 +104,19 @@ ARMNN_AUTO_TEST_CASE(Convolution2d2x3x3Dilation3x3Nhwc,
                      false,
                      DataLayout::NHWC)
 ARMNN_AUTO_TEST_CASE(Convolution2d2x3x3Dilation3x3Uint8,
-                     Convolution2d2x3x3Dilation3x3Test<DataType::QuantisedAsymm8, DataType::Signed32>,
+                     Convolution2d2x3x3Dilation3x3Test<DataType::QAsymmU8, DataType::Signed32>,
                      false,
                      DataLayout::NCHW)
 ARMNN_AUTO_TEST_CASE(Convolution2d2x3x3Dilation3x3NhwcUint8,
-                     Convolution2d2x3x3Dilation3x3Test<DataType::QuantisedAsymm8, DataType::Signed32>,
+                     Convolution2d2x3x3Dilation3x3Test<DataType::QAsymmU8, DataType::Signed32>,
                      false,
                      DataLayout::NHWC)
 ARMNN_AUTO_TEST_CASE(Convolution2d2x3x3Dilation3x3Int16,
-                     Convolution2d2x3x3Dilation3x3Test<DataType::QuantisedSymm16, DataType::Signed32>,
+                     Convolution2d2x3x3Dilation3x3Test<DataType::QSymmS16, DataType::Signed32>,
                      false,
                      DataLayout::NCHW)
 ARMNN_AUTO_TEST_CASE(Convolution2d2x3x3Dilation3x3NhwcInt16,
-                     Convolution2d2x3x3Dilation3x3Test<DataType::QuantisedSymm16, DataType::Signed32>,
+                     Convolution2d2x3x3Dilation3x3Test<DataType::QSymmS16, DataType::Signed32>,
                      false,
                      DataLayout::NHWC)
 
@@ -129,19 +129,19 @@ ARMNN_AUTO_TEST_CASE(Convolution2d2x2Dilation2x2Padding2x2Stride3x3Nhwc,
                      false,
                      DataLayout::NHWC)
 ARMNN_AUTO_TEST_CASE(Convolution2d2x2Dilation2x2Padding2x2Stride3x3Uint8,
-                     Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<DataType::QuantisedAsymm8, DataType::Signed32>,
+                     Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<DataType::QAsymmU8, DataType::Signed32>,
                      false,
                      DataLayout::NCHW)
 ARMNN_AUTO_TEST_CASE(Convolution2d2x2Dilation2x2Padding2x2Stride3x3NhwcUint8,
-                     Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<DataType::QuantisedAsymm8, DataType::Signed32>,
+                     Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<DataType::QAsymmU8, DataType::Signed32>,
                      false,
                      DataLayout::NHWC)
 ARMNN_AUTO_TEST_CASE(Convolution2d2x2Dilation2x2Padding2x2Stride3x3Int16,
-                     Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<DataType::QuantisedSymm16, DataType::Signed32>,
+                     Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<DataType::QSymmS16, DataType::Signed32>,
                      false,
                      DataLayout::NCHW)
 ARMNN_AUTO_TEST_CASE(Convolution2d2x2Dilation2x2Padding2x2Stride3x3NhwcInt16,
-                     Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<DataType::QuantisedSymm16, DataType::Signed32>,
+                     Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<DataType::QSymmS16, DataType::Signed32>,
                      false,
                      DataLayout::NHWC)
 
@@ -180,19 +180,19 @@ ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2d3x3Dilation3x3Nhwc,
                      false,
                      DataLayout::NHWC)
 ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2d3x3Dilation3x3Uint8,
-                     DepthwiseConvolution2d3x3Dilation3x3Test<DataType::QuantisedAsymm8, DataType::Signed32>,
+                     DepthwiseConvolution2d3x3Dilation3x3Test<DataType::QAsymmU8, DataType::Signed32>,
                      false,
                      DataLayout::NCHW)
 ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2d3x3Dilation3x3NhwcUint8,
-                     DepthwiseConvolution2d3x3Dilation3x3Test<DataType::QuantisedAsymm8, DataType::Signed32>,
+                     DepthwiseConvolution2d3x3Dilation3x3Test<DataType::QAsymmU8, DataType::Signed32>,
                      false,
                      DataLayout::NHWC)
 ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2d3x3Dilation3x3Int16,
-                     DepthwiseConvolution2d3x3Dilation3x3Test<DataType::QuantisedSymm16, DataType::Signed32>,
+                     DepthwiseConvolution2d3x3Dilation3x3Test<DataType::QSymmS16, DataType::Signed32>,
                      false,
                      DataLayout::NCHW)
 ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2d3x3Dilation3x3NhwcInt16,
-                     DepthwiseConvolution2d3x3Dilation3x3Test<DataType::QuantisedSymm16, DataType::Signed32>,
+                     DepthwiseConvolution2d3x3Dilation3x3Test<DataType::QSymmS16, DataType::Signed32>,
                      false,
                      DataLayout::NHWC)
 
@@ -205,19 +205,19 @@ ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2d2x3x3Dilation3x3Nhwc,
                      false,
                      DataLayout::NHWC)
 ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2d2x3x3Dilation3x3Uint8,
-                     DepthwiseConvolution2d2x3x3Dilation3x3Test<DataType::QuantisedAsymm8, DataType::Signed32>,
+                     DepthwiseConvolution2d2x3x3Dilation3x3Test<DataType::QAsymmU8, DataType::Signed32>,
                      false,
                      DataLayout::NCHW)
 ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2d2x3x3Dilation3x3NhwcUint8,
-                     DepthwiseConvolution2d2x3x3Dilation3x3Test<DataType::QuantisedAsymm8, DataType::Signed32>,
+                     DepthwiseConvolution2d2x3x3Dilation3x3Test<DataType::QAsymmU8, DataType::Signed32>,
                      false,
                      DataLayout::NHWC)
 ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2d2x3x3Dilation3x3Int16,
-                     DepthwiseConvolution2d2x3x3Dilation3x3Test<DataType::QuantisedSymm16, DataType::Signed32>,
+                     DepthwiseConvolution2d2x3x3Dilation3x3Test<DataType::QSymmS16, DataType::Signed32>,
                      false,
                      DataLayout::NCHW)
 ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2d2x3x3Dilation3x3NhwcInt16,
-                     DepthwiseConvolution2d2x3x3Dilation3x3Test<DataType::QuantisedSymm16, DataType::Signed32>,
+                     DepthwiseConvolution2d2x3x3Dilation3x3Test<DataType::QSymmS16, DataType::Signed32>,
                      false,
                      DataLayout::NHWC)
 ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2dMult4,
@@ -465,11 +465,11 @@ ARMNN_AUTO_TEST_CASE(TanhInt16, TanhInt16Test)
 
 // Fully Connected
 ARMNN_AUTO_TEST_CASE(SimpleFullyConnected, FullyConnectedFloat32Test, false, false)
-ARMNN_AUTO_TEST_CASE(FullyConnectedUint8, FullyConnectedTest<DataType::QuantisedAsymm8>, false)
-ARMNN_AUTO_TEST_CASE(FullyConnectedQSymm16, FullyConnectedTest<DataType::QuantisedSymm16>, false)
+ARMNN_AUTO_TEST_CASE(FullyConnectedUint8, FullyConnectedTest<DataType::QAsymmU8>, false)
+ARMNN_AUTO_TEST_CASE(FullyConnectedQSymm16, FullyConnectedTest<DataType::QSymmS16>, false)
 ARMNN_AUTO_TEST_CASE(SimpleFullyConnectedWithBias, FullyConnectedFloat32Test, true, false)
-ARMNN_AUTO_TEST_CASE(FullyConnectedBiasedUint8, FullyConnectedTest<DataType::QuantisedAsymm8>, true)
-ARMNN_AUTO_TEST_CASE(FullyConnectedBiasedQSymm16, FullyConnectedTest<DataType::QuantisedSymm16>, true)
+ARMNN_AUTO_TEST_CASE(FullyConnectedBiasedUint8, FullyConnectedTest<DataType::QAsymmU8>, true)
+ARMNN_AUTO_TEST_CASE(FullyConnectedBiasedQSymm16, FullyConnectedTest<DataType::QSymmS16>, true)
 ARMNN_AUTO_TEST_CASE(SimpleFullyConnectedWithTranspose, FullyConnectedFloat32Test, false, true)
 
 ARMNN_AUTO_TEST_CASE(FullyConnectedLarge, FullyConnectedLargeTest, false)
@@ -493,9 +493,9 @@ ARMNN_AUTO_TEST_CASE(ConcatUint8, ConcatUint8Test)
 ARMNN_AUTO_TEST_CASE(ConcatUint8DifferentQParams, ConcatUint8DifferentQParamsTest)
 ARMNN_AUTO_TEST_CASE(ConcatUint16, ConcatUint16Test)
 ARMNN_AUTO_TEST_CASE(ConcatUint8DifferentInputOutputQParam,
-                     ConcatDifferentInputOutputQParamTest<DataType::QuantisedAsymm8>, true)
+                     ConcatDifferentInputOutputQParamTest<DataType::QAsymmU8>, true)
 ARMNN_AUTO_TEST_CASE(ConcatInt16DifferentInputOutputQParam,
-                     ConcatDifferentInputOutputQParamTest<DataType::QuantisedSymm16>, true)
+                     ConcatDifferentInputOutputQParamTest<DataType::QSymmS16>, true)
 
 // Add
 ARMNN_AUTO_TEST_CASE(SimpleAdd, AdditionTest)
@@ -681,10 +681,10 @@ ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearFloat16,
                      SimpleResizeBilinearTest<DataType::Float16>,
                      DataLayout::NCHW)
 ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearUint8,
-                     SimpleResizeBilinearTest<DataType::QuantisedAsymm8>,
+                     SimpleResizeBilinearTest<DataType::QAsymmU8>,
                      DataLayout::NCHW)
 ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearUint16,
-                     SimpleResizeBilinearTest<DataType::QuantisedSymm16>,
+                     SimpleResizeBilinearTest<DataType::QSymmS16>,
                      DataLayout::NCHW)
 ARMNN_AUTO_TEST_CASE(ResizeBilinearNop,
                      ResizeBilinearNopTest<DataType::Float32>,
@@ -693,10 +693,10 @@ ARMNN_AUTO_TEST_CASE(ResizeBilinearNopFloat16,
                      ResizeBilinearNopTest<DataType::Float16>,
                      DataLayout::NCHW)
 ARMNN_AUTO_TEST_CASE(ResizeBilinearNopUint8,
-                     ResizeBilinearNopTest<DataType::QuantisedAsymm8>,
+                     ResizeBilinearNopTest<DataType::QAsymmU8>,
                      DataLayout::NCHW)
 ARMNN_AUTO_TEST_CASE(esizeBilinearNopUint16,
-                     SimpleResizeBilinearTest<DataType::QuantisedSymm16>,
+                     SimpleResizeBilinearTest<DataType::QSymmS16>,
                      DataLayout::NCHW)
 ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMin,
                      ResizeBilinearSqMinTest<DataType::Float32>,
@@ -705,10 +705,10 @@ ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinFloat16,
                      ResizeBilinearSqMinTest<DataType::Float16>,
                      DataLayout::NCHW)
 ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinUint8,
-                     ResizeBilinearSqMinTest<DataType::QuantisedAsymm8>,
+                     ResizeBilinearSqMinTest<DataType::QAsymmU8>,
                      DataLayout::NCHW)
 ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinUint16,
-                     SimpleResizeBilinearTest<DataType::QuantisedSymm16>,
+                     SimpleResizeBilinearTest<DataType::QSymmS16>,
                      DataLayout::NCHW)
 ARMNN_AUTO_TEST_CASE(ResizeBilinearMin,
                      ResizeBilinearMinTest<DataType::Float32>,
@@ -717,10 +717,10 @@ ARMNN_AUTO_TEST_CASE(ResizeBilinearMinFloat16,
                      ResizeBilinearMinTest<DataType::Float16>,
                      DataLayout::NCHW)
 ARMNN_AUTO_TEST_CASE(ResizeBilinearMinUint8,
-                     ResizeBilinearMinTest<DataType::QuantisedAsymm8>,
+                     ResizeBilinearMinTest<DataType::QAsymmU8>,
                      DataLayout::NCHW)
 ARMNN_AUTO_TEST_CASE(ResizeBilinearMinUint16,
-                     SimpleResizeBilinearTest<DataType::QuantisedSymm16>,
+                     SimpleResizeBilinearTest<DataType::QSymmS16>,
                      DataLayout::NCHW)
 ARMNN_AUTO_TEST_CASE(ResizeBilinearMag,
                      ResizeBilinearMagTest<DataType::Float32>,
@@ -729,10 +729,10 @@ ARMNN_AUTO_TEST_CASE(ResizeBilinearMagFloat16,
                      ResizeBilinearMagTest<DataType::Float16>,
                      DataLayout::NCHW)
 ARMNN_AUTO_TEST_CASE(ResizeBilinearMagUint8,
-                     ResizeBilinearMagTest<DataType::QuantisedAsymm8>,
+                     ResizeBilinearMagTest<DataType::QAsymmU8>,
                      DataLayout::NCHW)
 ARMNN_AUTO_TEST_CASE(ResizeBilinearMagUint16,
-                     SimpleResizeBilinearTest<DataType::QuantisedSymm16>,
+                     SimpleResizeBilinearTest<DataType::QSymmS16>,
                      DataLayout::NCHW)
 
 // Resize Bilinear - NHWC
@@ -743,10 +743,10 @@ ARMNN_AUTO_TEST_CASE(ResizeBilinearNopNhwcFloat16,
                      ResizeBilinearNopTest<DataType::Float16>,
                      DataLayout::NHWC)
 ARMNN_AUTO_TEST_CASE(ResizeBilinearNopUint8Nhwc,
-                     ResizeBilinearNopTest<DataType::QuantisedAsymm8>,
+                     ResizeBilinearNopTest<DataType::QAsymmU8>,
                      DataLayout::NHWC)
 ARMNN_AUTO_TEST_CASE(ResizeBilinearNopUint16Nhwc,
-                     ResizeBilinearNopTest<DataType::QuantisedSymm16>,
+                     ResizeBilinearNopTest<DataType::QSymmS16>,
                      DataLayout::NHWC)
 ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearNhwc,
                      SimpleResizeBilinearTest<DataType::Float32>,
@@ -755,10 +755,10 @@ ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearNhwcFloat16,
                      SimpleResizeBilinearTest<DataType::Float16>,
                      DataLayout::NHWC)
 ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearUint8Nhwc,
-                     SimpleResizeBilinearTest<DataType::QuantisedAsymm8>,
+                     SimpleResizeBilinearTest<DataType::QAsymmU8>,
                      DataLayout::NHWC)
 ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearUint16Nhwc,
-                     ResizeBilinearNopTest<DataType::QuantisedSymm16>,
+                     ResizeBilinearNopTest<DataType::QSymmS16>,
                      DataLayout::NHWC)
 ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinNhwc,
                      ResizeBilinearSqMinTest<DataType::Float32>,
@@ -767,10 +767,10 @@ ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinNhwcFloat16,
                      ResizeBilinearSqMinTest<DataType::Float16>,
                      DataLayout::NHWC)
 ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinUint8Nhwc,
-                     ResizeBilinearSqMinTest<DataType::QuantisedAsymm8>,
+                     ResizeBilinearSqMinTest<DataType::QAsymmU8>,
                      DataLayout::NHWC)
 ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinUint16Nhwc,
-                     ResizeBilinearNopTest<DataType::QuantisedSymm16>,
+                     ResizeBilinearNopTest<DataType::QSymmS16>,
                      DataLayout::NHWC)
 ARMNN_AUTO_TEST_CASE(ResizeBilinearMinNhwc,
                      ResizeBilinearMinTest<DataType::Float32>,
@@ -779,10 +779,10 @@ ARMNN_AUTO_TEST_CASE(ResizeBilinearMinNhwcFloat16,
                      ResizeBilinearMinTest<DataType::Float16>,
                      DataLayout::NHWC)
 ARMNN_AUTO_TEST_CASE(ResizeBilinearMinUint8Nhwc,
-                     ResizeBilinearMinTest<DataType::QuantisedAsymm8>,
+                     ResizeBilinearMinTest<DataType::QAsymmU8>,
                      DataLayout::NHWC)
 ARMNN_AUTO_TEST_CASE(ResizeBilinearMinUint16Nhwc,
-                     ResizeBilinearNopTest<DataType::QuantisedSymm16>,
+                     ResizeBilinearNopTest<DataType::QSymmS16>,
                      DataLayout::NHWC)
 ARMNN_AUTO_TEST_CASE(ResizeBilinearMagNhwc,
                      ResizeBilinearMagTest<DataType::Float32>,
@@ -791,10 +791,10 @@ ARMNN_AUTO_TEST_CASE(ResizeBilinearMagNhwcFloat16,
                      ResizeBilinearMagTest<DataType::Float16>,
                      DataLayout::NHWC)
 ARMNN_AUTO_TEST_CASE(ResizeBilinearMagUint8Nhwc,
-                     ResizeBilinearMagTest<DataType::QuantisedAsymm8>,
+                     ResizeBilinearMagTest<DataType::QAsymmU8>,
                      DataLayout::NHWC)
 ARMNN_AUTO_TEST_CASE(ResizeBilinearMagUint16Nhwc,
-                     ResizeBilinearNopTest<DataType::QuantisedSymm16>,
+                     ResizeBilinearNopTest<DataType::QSymmS16>,
                      DataLayout::NHWC)
 
 // Resize NearestNeighbor - NCHW
@@ -802,46 +802,46 @@ ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighbor,
                      SimpleResizeNearestNeighborTest<DataType::Float32>,
                      DataLayout::NCHW)
 ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighborUint8,
-                     SimpleResizeNearestNeighborTest<DataType::QuantisedAsymm8>,
+                     SimpleResizeNearestNeighborTest<DataType::QAsymmU8>,
                      DataLayout::NCHW)
 ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighborUint16,
-                     SimpleResizeNearestNeighborTest<DataType::QuantisedSymm16>,
+                     SimpleResizeNearestNeighborTest<DataType::QSymmS16>,
                      DataLayout::NCHW)
 ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborNop,
                      ResizeNearestNeighborNopTest<DataType::Float32>,
                      DataLayout::NCHW)
 ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborNopUint8,
-                     ResizeNearestNeighborNopTest<DataType::QuantisedAsymm8>,
+                     ResizeNearestNeighborNopTest<DataType::QAsymmU8>,
                      DataLayout::NCHW)
 ARMNN_AUTO_TEST_CASE(esizeNearestNeighborNopUint16,
-                     SimpleResizeNearestNeighborTest<DataType::QuantisedSymm16>,
+                     SimpleResizeNearestNeighborTest<DataType::QSymmS16>,
                      DataLayout::NCHW)
 ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMin,
                      ResizeNearestNeighborSqMinTest<DataType::Float32>,
                      DataLayout::NCHW)
 ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMinUint8,
-                     ResizeNearestNeighborSqMinTest<DataType::QuantisedAsymm8>,
+                     ResizeNearestNeighborSqMinTest<DataType::QAsymmU8>,
                      DataLayout::NCHW)
 ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMinUint16,
-                     SimpleResizeNearestNeighborTest<DataType::QuantisedSymm16>,
+                     SimpleResizeNearestNeighborTest<DataType::QSymmS16>,
                      DataLayout::NCHW)
 ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMin,
                      ResizeNearestNeighborMinTest<DataType::Float32>,
                      DataLayout::NCHW)
 ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMinUint8,
-                     ResizeNearestNeighborMinTest<DataType::QuantisedAsymm8>,
+                     ResizeNearestNeighborMinTest<DataType::QAsymmU8>,
                      DataLayout::NCHW)
 ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMinUint16,
-                     SimpleResizeNearestNeighborTest<DataType::QuantisedSymm16>,
+                     SimpleResizeNearestNeighborTest<DataType::QSymmS16>,
                      DataLayout::NCHW)
 ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMag,
                      ResizeNearestNeighborMagTest<DataType::Float32>,
                      DataLayout::NCHW, 0.10f, 50, 0.11f, 20)
 ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMagUint8,
-                     ResizeNearestNeighborMagTest<DataType::QuantisedAsymm8>,
+                     ResizeNearestNeighborMagTest<DataType::QAsymmU8>,
                      DataLayout::NCHW, 0.10f, 50, 0.11f, 20)
 ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMagUint16,
-                     SimpleResizeNearestNeighborTest<DataType::QuantisedSymm16>,
+                     SimpleResizeNearestNeighborTest<DataType::QSymmS16>,
                      DataLayout::NCHW)
 
 // Resize NearestNeighbor - NHWC
@@ -849,46 +849,46 @@ ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborNopNhwc,
                      ResizeNearestNeighborNopTest<DataType::Float32>,
                      DataLayout::NHWC)
 ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborNopUint8Nhwc,
-                     ResizeNearestNeighborNopTest<DataType::QuantisedAsymm8>,
+                     ResizeNearestNeighborNopTest<DataType::QAsymmU8>,
                      DataLayout::NHWC)
 ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborNopUint16Nhwc,
-                     ResizeNearestNeighborNopTest<DataType::QuantisedSymm16>,
+                     ResizeNearestNeighborNopTest<DataType::QSymmS16>,
                      DataLayout::NHWC)
 ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighborNhwc,
                      SimpleResizeNearestNeighborTest<DataType::Float32>,
                      DataLayout::NHWC)
 ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighborUint8Nhwc,
-                     SimpleResizeNearestNeighborTest<DataType::QuantisedAsymm8>,
+                     SimpleResizeNearestNeighborTest<DataType::QAsymmU8>,
                      DataLayout::NHWC)
 ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighborUint16Nhwc,
-                     ResizeNearestNeighborNopTest<DataType::QuantisedSymm16>,
+                     ResizeNearestNeighborNopTest<DataType::QSymmS16>,
                      DataLayout::NHWC)
 ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMinNhwc,
                      ResizeNearestNeighborSqMinTest<DataType::Float32>,
                      DataLayout::NHWC)
 ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMinUint8Nhwc,
-                     ResizeNearestNeighborSqMinTest<DataType::QuantisedAsymm8>,
+                     ResizeNearestNeighborSqMinTest<DataType::QAsymmU8>,
                      DataLayout::NHWC)
 ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMinUint16Nhwc,
-                     ResizeNearestNeighborNopTest<DataType::QuantisedSymm16>,
+                     ResizeNearestNeighborNopTest<DataType::QSymmS16>,
                      DataLayout::NHWC)
 ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMinNhwc,
                      ResizeNearestNeighborMinTest<DataType::Float32>,
                      DataLayout::NHWC)
 ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMinUint8Nhwc,
-                     ResizeNearestNeighborMinTest<DataType::QuantisedAsymm8>,
+                     ResizeNearestNeighborMinTest<DataType::QAsymmU8>,
                      DataLayout::NHWC)
 ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMinUint16Nhwc,
-                     ResizeNearestNeighborNopTest<DataType::QuantisedSymm16>,
+                     ResizeNearestNeighborNopTest<DataType::QSymmS16>,
                      DataLayout::NHWC)
 ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMagNhwc,
                      ResizeNearestNeighborMagTest<DataType::Float32>,
                      DataLayout::NHWC, 0.10f, 50, 0.11f, 20)
 ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMagUint8Nhwc,
-                     ResizeNearestNeighborMagTest<DataType::QuantisedAsymm8>,
+                     ResizeNearestNeighborMagTest<DataType::QAsymmU8>,
                      DataLayout::NHWC, 0.10f, 50, 0.11f, 20)
 ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMagUint16Nhwc,
-                     ResizeNearestNeighborNopTest<DataType::QuantisedSymm16>,
+                     ResizeNearestNeighborNopTest<DataType::QSymmS16>,
                      DataLayout::NHWC)
 
 // Fake Quantization
@@ -952,10 +952,10 @@ ARMNN_AUTO_TEST_CASE(PadUint82dCustomPadding, PadUint82dCustomPaddingTest)
 ARMNN_AUTO_TEST_CASE(PadUint83d, PadUint83dTest)
 ARMNN_AUTO_TEST_CASE(PadUint84d, PadUint84dTest)
 
-ARMNN_AUTO_TEST_CASE(Pad2dQSymm16, Pad2dTestCommon<DataType::QuantisedSymm16>, 2.0f, 0, 0.0f)
-ARMNN_AUTO_TEST_CASE(Pad2dQSymm16CustomPadding, Pad2dTestCommon<DataType::QuantisedSymm16>, 2.0f, 0, 1.0f)
-ARMNN_AUTO_TEST_CASE(Pad3dQSymm16, Pad3dTestCommon<DataType::QuantisedSymm16>, 2.0f, 0)
-ARMNN_AUTO_TEST_CASE(Pad4dQSymm16, Pad4dTestCommon<DataType::QuantisedSymm16>, 2.0f, 0)
+ARMNN_AUTO_TEST_CASE(Pad2dQSymm16, Pad2dTestCommon<DataType::QSymmS16>, 2.0f, 0, 0.0f)
+ARMNN_AUTO_TEST_CASE(Pad2dQSymm16CustomPadding, Pad2dTestCommon<DataType::QSymmS16>, 2.0f, 0, 1.0f)
+ARMNN_AUTO_TEST_CASE(Pad3dQSymm16, Pad3dTestCommon<DataType::QSymmS16>, 2.0f, 0)
+ARMNN_AUTO_TEST_CASE(Pad4dQSymm16, Pad4dTestCommon<DataType::QSymmS16>, 2.0f, 0)
 
 // Constant
 ARMNN_AUTO_TEST_CASE(Constant, ConstantTest)
@@ -1011,12 +1011,12 @@ ARMNN_AUTO_TEST_CASE(Concat4dDiffShapeDim3Uint8, Concat4dDiffShapeDim3Uint8Test,
 // Floor
 ARMNN_AUTO_TEST_CASE(SimpleFloor, SimpleFloorTest<DataType::Float32>)
 ARMNN_AUTO_TEST_CASE(SimpleFloorFloat16, SimpleFloorTest<DataType::Float16>)
-ARMNN_AUTO_TEST_CASE(SimpleFloorQuantisedSymm16, SimpleFloorTest<DataType::QuantisedSymm16>)
+ARMNN_AUTO_TEST_CASE(SimpleFloorQuantisedSymm16, SimpleFloorTest<DataType::QSymmS16>)
 
 // Reshape
 ARMNN_AUTO_TEST_CASE(SimpleReshapeFloat32, SimpleReshapeTest<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE(SimpleReshapeQuantisedAsymm8, SimpleReshapeTest<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(SimpleReshapeQuantisedSymm16, SimpleReshapeTest<DataType::QuantisedSymm16>)
+ARMNN_AUTO_TEST_CASE(SimpleReshapeQuantisedAsymm8, SimpleReshapeTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(SimpleReshapeQuantisedSymm16, SimpleReshapeTest<DataType::QSymmS16>)
 ARMNN_AUTO_TEST_CASE(Reshape5d, Reshape5dTest<DataType::Float32>)
 
 // Rsqrt
@@ -1026,24 +1026,24 @@ ARMNN_AUTO_TEST_CASE(RsqrtZero, RsqrtZeroTest<DataType::Float32>)
 ARMNN_AUTO_TEST_CASE(RsqrtNegative, RsqrtNegativeTest<DataType::Float32>)
 ARMNN_AUTO_TEST_CASE(Rsqrt2dFloat16, Rsqrt2dTest<DataType::Float16>)
 ARMNN_AUTO_TEST_CASE(Rsqrt3dFloat16, Rsqrt3dTest<DataType::Float16>)
-ARMNN_AUTO_TEST_CASE(Rsqrt2dQuantisedAsymm8, Rsqrt2dTest<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(Rsqrt3dQuantisedAsymm8, Rsqrt3dTest<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(Rsqrt2dQuantisedSymm16, Rsqrt2dTest<DataType::QuantisedSymm16>)
-ARMNN_AUTO_TEST_CASE(Rsqrt3dQuantisedSymm16, Rsqrt3dTest<DataType::QuantisedSymm16>)
+ARMNN_AUTO_TEST_CASE(Rsqrt2dQuantisedAsymm8, Rsqrt2dTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(Rsqrt3dQuantisedAsymm8, Rsqrt3dTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(Rsqrt2dQuantisedSymm16, Rsqrt2dTest<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_CASE(Rsqrt3dQuantisedSymm16, Rsqrt3dTest<DataType::QSymmS16>)
 
 // Permute
 ARMNN_AUTO_TEST_CASE(SimplePermuteFloat32, SimplePermuteTest<DataType::Float32>)
 ARMNN_AUTO_TEST_CASE(PermuteFloat32ValueSet1Test, PermuteValueSet1Test<DataType::Float32>)
 ARMNN_AUTO_TEST_CASE(PermuteFloat32ValueSet2Test, PermuteValueSet2Test<DataType::Float32>)
 ARMNN_AUTO_TEST_CASE(PermuteFloat32ValueSet3Test, PermuteValueSet3Test<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE(SimplePermuteQASymm8, SimplePermuteTest<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(PermuteQASymm8ValueSet1Test, PermuteValueSet1Test<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(PermuteQASymm8ValueSet2Test, PermuteValueSet2Test<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(PermuteQASymm8ValueSet3Test, PermuteValueSet3Test<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(SimplePermuteQSymm16, SimplePermuteTest<DataType::QuantisedSymm16>)
-ARMNN_AUTO_TEST_CASE(PermuteQSymm16ValueSet1Test, PermuteValueSet1Test<DataType::QuantisedSymm16>)
-ARMNN_AUTO_TEST_CASE(PermuteQSymm16ValueSet2Test, PermuteValueSet2Test<DataType::QuantisedSymm16>)
-ARMNN_AUTO_TEST_CASE(PermuteQSymm16ValueSet3Test, PermuteValueSet3Test<DataType::QuantisedSymm16>)
+ARMNN_AUTO_TEST_CASE(SimplePermuteQASymm8, SimplePermuteTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(PermuteQASymm8ValueSet1Test, PermuteValueSet1Test<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(PermuteQASymm8ValueSet2Test, PermuteValueSet2Test<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(PermuteQASymm8ValueSet3Test, PermuteValueSet3Test<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(SimplePermuteQSymm16, SimplePermuteTest<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_CASE(PermuteQSymm16ValueSet1Test, PermuteValueSet1Test<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_CASE(PermuteQSymm16ValueSet2Test, PermuteValueSet2Test<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_CASE(PermuteQSymm16ValueSet3Test, PermuteValueSet3Test<DataType::QSymmS16>)
 
 // Lstm
 BOOST_AUTO_TEST_CASE(LstmUtilsZeroVector) {
@@ -1090,21 +1090,21 @@ ARMNN_AUTO_TEST_CASE(MeanVts1Float32, MeanVts1Test<DataType::Float32>)
 ARMNN_AUTO_TEST_CASE(MeanVts2Float32, MeanVts2Test<DataType::Float32>)
 ARMNN_AUTO_TEST_CASE(MeanVts3Float32, MeanVts3Test<DataType::Float32>)
 
-ARMNN_AUTO_TEST_CASE(MeanSimpleQuantisedAsymm8, MeanSimpleTest<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(MeanSimpleAxisQuantisedAsymm8, MeanSimpleAxisTest<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(MeanKeepDimsQuantisedAsymm8, MeanKeepDimsTest<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(MeanMultipleDimsQuantisedAsymm8, MeanMultipleDimsTest<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(MeanVts1QuantisedAsymm8, MeanVts1Test<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(MeanVts2QuantisedAsymm8, MeanVts2Test<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(MeanVts3QuantisedAsymm8, MeanVts3Test<DataType::QuantisedAsymm8>)
-
-ARMNN_AUTO_TEST_CASE(MeanSimpleQuantisedSymm16, MeanSimpleTest<DataType::QuantisedSymm16>)
-ARMNN_AUTO_TEST_CASE(MeanSimpleAxisQuantisedSymm16, MeanSimpleAxisTest<DataType::QuantisedSymm16>)
-ARMNN_AUTO_TEST_CASE(MeanKeepDimsQuantisedSymm16, MeanKeepDimsTest<DataType::QuantisedSymm16>)
-ARMNN_AUTO_TEST_CASE(MeanMultipleDimsQuantisedSymm16, MeanMultipleDimsTest<DataType::QuantisedSymm16>)
-ARMNN_AUTO_TEST_CASE(MeanVts1QuantisedSymm16, MeanVts1Test<DataType::QuantisedSymm16>)
-ARMNN_AUTO_TEST_CASE(MeanVts2QuantisedSymm16, MeanVts2Test<DataType::QuantisedSymm16>)
-ARMNN_AUTO_TEST_CASE(MeanVts3QuantisedSymm16, MeanVts3Test<DataType::QuantisedSymm16>)
+ARMNN_AUTO_TEST_CASE(MeanSimpleQuantisedAsymm8, MeanSimpleTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(MeanSimpleAxisQuantisedAsymm8, MeanSimpleAxisTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(MeanKeepDimsQuantisedAsymm8, MeanKeepDimsTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(MeanMultipleDimsQuantisedAsymm8, MeanMultipleDimsTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(MeanVts1QuantisedAsymm8, MeanVts1Test<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(MeanVts2QuantisedAsymm8, MeanVts2Test<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(MeanVts3QuantisedAsymm8, MeanVts3Test<DataType::QAsymmU8>)
+
+ARMNN_AUTO_TEST_CASE(MeanSimpleQuantisedSymm16, MeanSimpleTest<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_CASE(MeanSimpleAxisQuantisedSymm16, MeanSimpleAxisTest<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_CASE(MeanKeepDimsQuantisedSymm16, MeanKeepDimsTest<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_CASE(MeanMultipleDimsQuantisedSymm16, MeanMultipleDimsTest<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_CASE(MeanVts1QuantisedSymm16, MeanVts1Test<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_CASE(MeanVts2QuantisedSymm16, MeanVts2Test<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_CASE(MeanVts3QuantisedSymm16, MeanVts3Test<DataType::QSymmS16>)
 
 ARMNN_AUTO_TEST_CASE(AdditionAfterMaxPool, AdditionAfterMaxPoolTest)
 
@@ -1123,15 +1123,15 @@ ARMNN_AUTO_TEST_CASE(ArgMaxChannelSigned32, ArgMaxChannelTest<DataType::Signed32
 ARMNN_AUTO_TEST_CASE(ArgMaxHeightSigned32, ArgMaxHeightTest<DataType::Signed32>)
 ARMNN_AUTO_TEST_CASE(ArgMinWidthSigned32, ArgMinWidthTest<DataType::Signed32>)
 
-ARMNN_AUTO_TEST_CASE(ArgMaxSimpleQuantisedAsymm8, ArgMaxSimpleTest<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(ArgMinSimpleQuantisedAsymm8, ArgMinSimpleTest<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(ArgMinChannelQuantisedAsymm8, ArgMinChannelTest<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(ArgMaxChannelQuantisedAsymm8, ArgMaxChannelTest<DataType::QuantisedAsymm8>)
+ARMNN_AUTO_TEST_CASE(ArgMaxSimpleQuantisedAsymm8, ArgMaxSimpleTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(ArgMinSimpleQuantisedAsymm8, ArgMinSimpleTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(ArgMinChannelQuantisedAsymm8, ArgMinChannelTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(ArgMaxChannelQuantisedAsymm8, ArgMaxChannelTest<DataType::QAsymmU8>)
 
-ARMNN_AUTO_TEST_CASE(ArgMaxSimpleQuantisedSymm16, ArgMaxSimpleTest<DataType::QuantisedSymm16>)
-ARMNN_AUTO_TEST_CASE(ArgMinSimpleQuantisedSymm16, ArgMinSimpleTest<DataType::QuantisedSymm16>)
-ARMNN_AUTO_TEST_CASE(ArgMinChannelQuantisedSymm16, ArgMinChannelTest<DataType::QuantisedSymm16>)
-ARMNN_AUTO_TEST_CASE(ArgMaxChannelQuantisedSymm16, ArgMaxChannelTest<DataType::QuantisedSymm16>)
+ARMNN_AUTO_TEST_CASE(ArgMaxSimpleQuantisedSymm16, ArgMaxSimpleTest<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_CASE(ArgMinSimpleQuantisedSymm16, ArgMinSimpleTest<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_CASE(ArgMinChannelQuantisedSymm16, ArgMinChannelTest<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_CASE(ArgMaxChannelQuantisedSymm16, ArgMaxChannelTest<DataType::QSymmS16>)
 
 // Space To Batch Nd
 ARMNN_AUTO_TEST_CASE(SpaceToBatchNdSimpleFloat32, SpaceToBatchNdSimpleFloat32Test)
@@ -1191,21 +1191,21 @@ ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcFloat16_5, BatchToSpaceNdNhwcTest5<DataTy
 ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcFloat16_6, BatchToSpaceNdNhwcTest6<DataType::Float16>)
 ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcFloat16_7, BatchToSpaceNdNhwcTest7<DataType::Float16>)
 
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcUint1,  BatchToSpaceNdNhwcTest1<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcUint2,  BatchToSpaceNdNhwcTest2<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcUint3,  BatchToSpaceNdNhwcTest3<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcUint4,  BatchToSpaceNdNhwcTest4<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcUint5,  BatchToSpaceNdNhwcTest5<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcUint6,  BatchToSpaceNdNhwcTest6<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcUint7,  BatchToSpaceNdNhwcTest7<DataType::QuantisedAsymm8>)
-
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcQsymm16_1,  BatchToSpaceNdNhwcTest1<DataType::QuantisedSymm16>)
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcQsymm16_2,  BatchToSpaceNdNhwcTest2<DataType::QuantisedSymm16>)
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcQsymm16_3,  BatchToSpaceNdNhwcTest3<DataType::QuantisedSymm16>)
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcQsymm16_4,  BatchToSpaceNdNhwcTest4<DataType::QuantisedSymm16>)
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcQsymm16_5,  BatchToSpaceNdNhwcTest5<DataType::QuantisedSymm16>)
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcQsymm16_6,  BatchToSpaceNdNhwcTest6<DataType::QuantisedSymm16>)
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcQsymm16_7,  BatchToSpaceNdNhwcTest7<DataType::QuantisedSymm16>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcUint1,  BatchToSpaceNdNhwcTest1<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcUint2,  BatchToSpaceNdNhwcTest2<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcUint3,  BatchToSpaceNdNhwcTest3<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcUint4,  BatchToSpaceNdNhwcTest4<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcUint5,  BatchToSpaceNdNhwcTest5<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcUint6,  BatchToSpaceNdNhwcTest6<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcUint7,  BatchToSpaceNdNhwcTest7<DataType::QAsymmU8>)
+
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcQsymm16_1,  BatchToSpaceNdNhwcTest1<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcQsymm16_2,  BatchToSpaceNdNhwcTest2<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcQsymm16_3,  BatchToSpaceNdNhwcTest3<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcQsymm16_4,  BatchToSpaceNdNhwcTest4<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcQsymm16_5,  BatchToSpaceNdNhwcTest5<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcQsymm16_6,  BatchToSpaceNdNhwcTest6<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcQsymm16_7,  BatchToSpaceNdNhwcTest7<DataType::QSymmS16>)
 
 ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwFloat16_1, BatchToSpaceNdNchwTest1<DataType::Float16>)
 ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwFloat16_2, BatchToSpaceNdNchwTest2<DataType::Float16>)
@@ -1215,21 +1215,21 @@ ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwFloat16_5, BatchToSpaceNdNchwTest5<DataTy
 ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwFloat16_6, BatchToSpaceNdNchwTest6<DataType::Float16>)
 ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwFloat16_7, BatchToSpaceNdNchwTest7<DataType::Float16>)
 
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwUint1,  BatchToSpaceNdNchwTest1<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwUint2,  BatchToSpaceNdNchwTest2<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwUint3,  BatchToSpaceNdNchwTest3<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwUint4,  BatchToSpaceNdNchwTest4<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwUint5,  BatchToSpaceNdNchwTest5<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwUint6,  BatchToSpaceNdNchwTest6<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwUint7,  BatchToSpaceNdNchwTest7<DataType::QuantisedAsymm8>)
-
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwQsymm16_1,  BatchToSpaceNdNchwTest1<DataType::QuantisedSymm16>)
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwQsymm16_2,  BatchToSpaceNdNchwTest2<DataType::QuantisedSymm16>)
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwQsymm16_3,  BatchToSpaceNdNchwTest3<DataType::QuantisedSymm16>)
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwQsymm16_4,  BatchToSpaceNdNchwTest4<DataType::QuantisedSymm16>)
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwQsymm16_5,  BatchToSpaceNdNchwTest5<DataType::QuantisedSymm16>)
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwQsymm16_6,  BatchToSpaceNdNchwTest6<DataType::QuantisedSymm16>)
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwQsymm16_7,  BatchToSpaceNdNchwTest7<DataType::QuantisedSymm16>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwUint1,  BatchToSpaceNdNchwTest1<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwUint2,  BatchToSpaceNdNchwTest2<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwUint3,  BatchToSpaceNdNchwTest3<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwUint4,  BatchToSpaceNdNchwTest4<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwUint5,  BatchToSpaceNdNchwTest5<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwUint6,  BatchToSpaceNdNchwTest6<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwUint7,  BatchToSpaceNdNchwTest7<DataType::QAsymmU8>)
+
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwQsymm16_1,  BatchToSpaceNdNchwTest1<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwQsymm16_2,  BatchToSpaceNdNchwTest2<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwQsymm16_3,  BatchToSpaceNdNchwTest3<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwQsymm16_4,  BatchToSpaceNdNchwTest4<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwQsymm16_5,  BatchToSpaceNdNchwTest5<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwQsymm16_6,  BatchToSpaceNdNchwTest6<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwQsymm16_7,  BatchToSpaceNdNchwTest7<DataType::QSymmS16>)
 
 // DepthToSpace
 ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwFloat32_1, DepthToSpaceTest1<DataType::Float32>, DataLayout::NCHW);
@@ -1242,15 +1242,15 @@ ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwFloat16_2, DepthToSpaceTest2<DataType::Floa
 ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwFloat16_3, DepthToSpaceTest3<DataType::Float16>, DataLayout::NCHW);
 ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwFloat16_4, DepthToSpaceTest4<DataType::Float16>, DataLayout::NCHW);
 
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwUint8_1, DepthToSpaceTest1<DataType::QuantisedAsymm8>, DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwUint8_2, DepthToSpaceTest2<DataType::QuantisedAsymm8>, DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwUint8_3, DepthToSpaceTest3<DataType::QuantisedAsymm8>, DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwUint8_4, DepthToSpaceTest4<DataType::QuantisedAsymm8>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwUint8_1, DepthToSpaceTest1<DataType::QAsymmU8>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwUint8_2, DepthToSpaceTest2<DataType::QAsymmU8>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwUint8_3, DepthToSpaceTest3<DataType::QAsymmU8>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwUint8_4, DepthToSpaceTest4<DataType::QAsymmU8>, DataLayout::NCHW);
 
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt16_1, DepthToSpaceTest1<DataType::QuantisedSymm16>, DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt16_2, DepthToSpaceTest2<DataType::QuantisedSymm16>, DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt16_3, DepthToSpaceTest3<DataType::QuantisedSymm16>, DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt16_4, DepthToSpaceTest4<DataType::QuantisedSymm16>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt16_1, DepthToSpaceTest1<DataType::QSymmS16>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt16_2, DepthToSpaceTest2<DataType::QSymmS16>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt16_3, DepthToSpaceTest3<DataType::QSymmS16>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt16_4, DepthToSpaceTest4<DataType::QSymmS16>, DataLayout::NCHW);
 
 ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcFloat32_1, DepthToSpaceTest1<DataType::Float32>, DataLayout::NHWC);
 ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcFloat32_2, DepthToSpaceTest2<DataType::Float32>, DataLayout::NHWC);
@@ -1262,15 +1262,15 @@ ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcFloat16_2, DepthToSpaceTest2<DataType::Floa
 ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcFloat16_3, DepthToSpaceTest3<DataType::Float16>, DataLayout::NHWC);
 ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcFloat16_4, DepthToSpaceTest4<DataType::Float16>, DataLayout::NHWC);
 
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcUint8_1, DepthToSpaceTest1<DataType::QuantisedAsymm8>, DataLayout::NHWC);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcUint8_2, DepthToSpaceTest2<DataType::QuantisedAsymm8>, DataLayout::NHWC);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcUint8_3, DepthToSpaceTest3<DataType::QuantisedAsymm8>, DataLayout::NHWC);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcUint8_4, DepthToSpaceTest4<DataType::QuantisedAsymm8>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcUint8_1, DepthToSpaceTest1<DataType::QAsymmU8>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcUint8_2, DepthToSpaceTest2<DataType::QAsymmU8>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcUint8_3, DepthToSpaceTest3<DataType::QAsymmU8>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcUint8_4, DepthToSpaceTest4<DataType::QAsymmU8>, DataLayout::NHWC);
 
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt16_1, DepthToSpaceTest1<DataType::QuantisedSymm16>, DataLayout::NHWC);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt16_2, DepthToSpaceTest2<DataType::QuantisedSymm16>, DataLayout::NHWC);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt16_3, DepthToSpaceTest3<DataType::QuantisedSymm16>, DataLayout::NHWC);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt16_4, DepthToSpaceTest4<DataType::QuantisedSymm16>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt16_1, DepthToSpaceTest1<DataType::QSymmS16>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt16_2, DepthToSpaceTest2<DataType::QSymmS16>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt16_3, DepthToSpaceTest3<DataType::QSymmS16>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt16_4, DepthToSpaceTest4<DataType::QSymmS16>, DataLayout::NHWC);
 
 // SpaceToDepth
 ARMNN_AUTO_TEST_CASE(SpaceToDepthNchwAsymmQ8, SpaceToDepthNchwAsymmQ8Test)
@@ -1381,10 +1381,10 @@ ARMNN_AUTO_TEST_CASE(Abs3d, Abs3dTest<DataType::Float32>)
 ARMNN_AUTO_TEST_CASE(AbsZero, AbsZeroTest<DataType::Float32>)
 ARMNN_AUTO_TEST_CASE(Abs2dFloat16, Abs2dTest<DataType::Float16>)
 ARMNN_AUTO_TEST_CASE(Abs3dFloat16, Abs3dTest<DataType::Float16>)
-ARMNN_AUTO_TEST_CASE(Abs2dQuantisedAsymm8, Abs2dTest<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(Abs3dQuantisedAsymm8, Abs3dTest<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(Abs2dQuantisedSymm16, Abs2dTest<DataType::QuantisedSymm16>)
-ARMNN_AUTO_TEST_CASE(Abs3dQuantisedSymm16, Abs3dTest<DataType::QuantisedSymm16>)
+ARMNN_AUTO_TEST_CASE(Abs2dQuantisedAsymm8, Abs2dTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(Abs3dQuantisedAsymm8, Abs3dTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(Abs2dQuantisedSymm16, Abs2dTest<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_CASE(Abs3dQuantisedSymm16, Abs3dTest<DataType::QSymmS16>)
 
 // Detection PostProcess
 BOOST_AUTO_TEST_CASE(DetectionPostProcessRegularNmsFloat)
@@ -1398,22 +1398,22 @@ BOOST_AUTO_TEST_CASE(DetectionPostProcessFastNmsFloat)
 BOOST_AUTO_TEST_CASE(DetectionPostProcessRegularNmsUint8)
 {
     DetectionPostProcessRegularNmsQuantizedTest<
-        RefWorkloadFactory, DataType::QuantisedAsymm8>();
+        RefWorkloadFactory, DataType::QAsymmU8>();
 }
 BOOST_AUTO_TEST_CASE(DetectionPostProcessFastNmsUint8)
 {
     DetectionPostProcessRegularNmsQuantizedTest<
-        RefWorkloadFactory, DataType::QuantisedAsymm8>();
+        RefWorkloadFactory, DataType::QAsymmU8>();
 }
 BOOST_AUTO_TEST_CASE(DetectionPostProcessRegularNmsInt16)
 {
     DetectionPostProcessRegularNmsQuantizedTest<
-        RefWorkloadFactory, DataType::QuantisedSymm16>();
+        RefWorkloadFactory, DataType::QSymmS16>();
 }
 BOOST_AUTO_TEST_CASE(DetectionPostProcessFastNmsInt16)
 {
     DetectionPostProcessFastNmsQuantizedTest<
-        RefWorkloadFactory, DataType::QuantisedSymm16>();
+        RefWorkloadFactory, DataType::QSymmS16>();
 }
 
 // Dequantize
@@ -1434,8 +1434,8 @@ ARMNN_AUTO_TEST_CASE(QuantizeClampInt16, QuantizeClampInt16Test)
 // PReLU
 ARMNN_AUTO_TEST_CASE(PreluFloat32, PreluTest<DataType::Float32>)
 ARMNN_AUTO_TEST_CASE(PreluFloat16, PreluTest<DataType::Float16>)
-ARMNN_AUTO_TEST_CASE(PreluUint8,   PreluTest<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(PreluInt16,   PreluTest<DataType::QuantisedSymm16>)
+ARMNN_AUTO_TEST_CASE(PreluUint8,   PreluTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(PreluInt16,   PreluTest<DataType::QSymmS16>)
 
 // Slice
 ARMNN_AUTO_TEST_CASE(Slice4dFloat32, Slice4dFloat32Test)
@@ -1463,19 +1463,19 @@ ARMNN_AUTO_TEST_CASE(SimpleTransposeConvolution2dFloatNhwc,
                      true,
                      DataLayout::NHWC)
 ARMNN_AUTO_TEST_CASE(SimpleTransposeConvolution2dUint8Nchw,
-                     SimpleTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+                     SimpleTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
                      true,
                      DataLayout::NCHW)
 ARMNN_AUTO_TEST_CASE(SimpleTransposeConvolution2dUint8Nhwc,
-                     SimpleTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+                     SimpleTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
                      true,
                      DataLayout::NHWC)
 ARMNN_AUTO_TEST_CASE(SimpleTransposeConvolution2dInt16Nchw,
-                     SimpleTransposeConvolution2dTest<DataType::QuantisedSymm16, DataType::Signed32>,
+                     SimpleTransposeConvolution2dTest<DataType::QSymmS16, DataType::Signed32>,
                      true,
                      DataLayout::NCHW)
 ARMNN_AUTO_TEST_CASE(SimpleTransposeConvolution2dInt16Nhwc,
-                     SimpleTransposeConvolution2dTest<DataType::QuantisedSymm16, DataType::Signed32>,
+                     SimpleTransposeConvolution2dTest<DataType::QSymmS16, DataType::Signed32>,
                      true,
                      DataLayout::NCHW)
 
@@ -1488,19 +1488,19 @@ ARMNN_AUTO_TEST_CASE(UnbiasedSimpleTransposeConvolution2dFloatNhwc,
                      true,
                      DataLayout::NHWC)
 ARMNN_AUTO_TEST_CASE(UnbiasedSimpleTransposeConvolution2dUint8Nchw,
-                     SimpleTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+                     SimpleTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
                      true,
                      DataLayout::NCHW)
 ARMNN_AUTO_TEST_CASE(UnbiasedSimpleTransposeConvolution2dUint8Nhwc,
-                     SimpleTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+                     SimpleTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
                      true,
                      DataLayout::NHWC)
 ARMNN_AUTO_TEST_CASE(UnbiasedSimpleTransposeConvolution2dInt16Nchw,
-                     SimpleTransposeConvolution2dTest<DataType::QuantisedSymm16, DataType::Signed32>,
+                     SimpleTransposeConvolution2dTest<DataType::QSymmS16, DataType::Signed32>,
                      true,
                      DataLayout::NCHW)
 ARMNN_AUTO_TEST_CASE(UnbiasedSimpleTransposeConvolution2dInt16Nhwc,
-                     SimpleTransposeConvolution2dTest<DataType::QuantisedSymm16, DataType::Signed32>,
+                     SimpleTransposeConvolution2dTest<DataType::QSymmS16, DataType::Signed32>,
                      true,
                      DataLayout::NCHW)
 
@@ -1513,19 +1513,19 @@ ARMNN_AUTO_TEST_CASE(PaddedTransposeConvolution2dFloatNhwc,
                      true,
                      DataLayout::NHWC)
 ARMNN_AUTO_TEST_CASE(PaddedTransposeConvolution2dUint8Nchw,
-                     PaddedTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+                     PaddedTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
                      true,
                      DataLayout::NCHW)
 ARMNN_AUTO_TEST_CASE(PaddedTransposeConvolution2dUint8Nhwc,
-                     PaddedTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+                     PaddedTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
                      true,
                      DataLayout::NHWC)
 ARMNN_AUTO_TEST_CASE(PaddedTransposeConvolution2dInt16Nchw,
-                     PaddedTransposeConvolution2dTest<DataType::QuantisedSymm16, DataType::Signed32>,
+                     PaddedTransposeConvolution2dTest<DataType::QSymmS16, DataType::Signed32>,
                      true,
                      DataLayout::NCHW)
 ARMNN_AUTO_TEST_CASE(PaddedTransposeConvolution2dInt16Nhwc,
-                     PaddedTransposeConvolution2dTest<DataType::QuantisedSymm16, DataType::Signed32>,
+                     PaddedTransposeConvolution2dTest<DataType::QSymmS16, DataType::Signed32>,
                      true,
                      DataLayout::NCHW)
 
@@ -1538,19 +1538,19 @@ ARMNN_AUTO_TEST_CASE(UnbiasedPaddedTransposeConvolution2dFloatNhwc,
                      true,
                      DataLayout::NHWC)
 ARMNN_AUTO_TEST_CASE(UnbiasedPaddedTransposeConvolution2dUint8Nchw,
-                     PaddedTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+                     PaddedTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
                      true,
                      DataLayout::NCHW)
 ARMNN_AUTO_TEST_CASE(UnbiasedPaddedTransposeConvolution2dUint8Nhwc,
-                     PaddedTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+                     PaddedTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
                      true,
                      DataLayout::NHWC)
 ARMNN_AUTO_TEST_CASE(UnbiasedPaddedTransposeConvolution2dInt16Nchw,
-                     PaddedTransposeConvolution2dTest<DataType::QuantisedSymm16, DataType::Signed32>,
+                     PaddedTransposeConvolution2dTest<DataType::QSymmS16, DataType::Signed32>,
                      true,
                      DataLayout::NCHW)
 ARMNN_AUTO_TEST_CASE(UnbiasedPaddedTransposeConvolution2dInt16Nhwc,
-                     PaddedTransposeConvolution2dTest<DataType::QuantisedSymm16, DataType::Signed32>,
+                     PaddedTransposeConvolution2dTest<DataType::QSymmS16, DataType::Signed32>,
                      true,
                      DataLayout::NCHW)
 
@@ -1563,19 +1563,19 @@ ARMNN_AUTO_TEST_CASE(StridedTransposeConvolution2dFloatNhwc,
                      true,
                      DataLayout::NHWC)
 ARMNN_AUTO_TEST_CASE(StridedTransposeConvolution2dUint8Nchw,
-                     StridedTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+                     StridedTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
                      true,
                      DataLayout::NCHW)
 ARMNN_AUTO_TEST_CASE(StridedTransposeConvolution2dUint8Nhwc,
-                     StridedTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+                     StridedTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
                      true,
                      DataLayout::NHWC)
 ARMNN_AUTO_TEST_CASE(StridedTransposeConvolution2dInt16Nchw,
-                     StridedTransposeConvolution2dTest<DataType::QuantisedSymm16, DataType::Signed32>,
+                     StridedTransposeConvolution2dTest<DataType::QSymmS16, DataType::Signed32>,
                      true,
                      DataLayout::NCHW)
 ARMNN_AUTO_TEST_CASE(StridedTransposeConvolution2dInt16Nhwc,
-                     StridedTransposeConvolution2dTest<DataType::QuantisedSymm16, DataType::Signed32>,
+                     StridedTransposeConvolution2dTest<DataType::QSymmS16, DataType::Signed32>,
                      true,
                      DataLayout::NCHW)
 
@@ -1588,19 +1588,19 @@ ARMNN_AUTO_TEST_CASE(UnbiasedStridedTransposeConvolution2dFloatNhwc,
                      true,
                      DataLayout::NHWC)
 ARMNN_AUTO_TEST_CASE(UnbiasedStridedTransposeConvolution2dUint8Nchw,
-                     StridedTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+                     StridedTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
                      true,
                      DataLayout::NCHW)
 ARMNN_AUTO_TEST_CASE(UnbiasedStridedTransposeConvolution2dUint8Nhwc,
-                     StridedTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+                     StridedTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
                      true,
                      DataLayout::NHWC)
 ARMNN_AUTO_TEST_CASE(UnbiasedStridedTransposeConvolution2dInt16Nchw,
-                     StridedTransposeConvolution2dTest<DataType::QuantisedSymm16, DataType::Signed32>,
+                     StridedTransposeConvolution2dTest<DataType::QSymmS16, DataType::Signed32>,
                      true,
                      DataLayout::NCHW)
 ARMNN_AUTO_TEST_CASE(UnbiasedStridedTransposeConvolution2dInt16Nhwc,
-                     StridedTransposeConvolution2dTest<DataType::QuantisedSymm16, DataType::Signed32>,
+                     StridedTransposeConvolution2dTest<DataType::QSymmS16, DataType::Signed32>,
                      true,
                      DataLayout::NCHW)
 
@@ -1611,16 +1611,16 @@ ARMNN_AUTO_TEST_CASE(MultiChannelTransposeConvolution2dFloatNhwc,
                      MultiChannelTransposeConvolution2dTest<DataType::Float32, DataType::Float32>,
                      DataLayout::NHWC)
 ARMNN_AUTO_TEST_CASE(MultiChannelTransposeConvolution2dUint8Nchw,
-                     MultiChannelTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+                     MultiChannelTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
                      DataLayout::NCHW)
 ARMNN_AUTO_TEST_CASE(MultiChannelTransposeConvolution2dUint8Nhwc,
-                     MultiChannelTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+                     MultiChannelTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
                      DataLayout::NHWC)
 ARMNN_AUTO_TEST_CASE(MultiChannelTransposeConvolution2dInt16Nchw,
-                     MultiChannelTransposeConvolution2dTest<DataType::QuantisedSymm16, DataType::Signed32>,
+                     MultiChannelTransposeConvolution2dTest<DataType::QSymmS16, DataType::Signed32>,
                      DataLayout::NCHW)
 ARMNN_AUTO_TEST_CASE(MultiChannelTransposeConvolution2dInt16Nhwc,
-                     MultiChannelTransposeConvolution2dTest<DataType::QuantisedSymm16, DataType::Signed32>,
+                     MultiChannelTransposeConvolution2dTest<DataType::QSymmS16, DataType::Signed32>,
                      DataLayout::NCHW)
 
 ARMNN_AUTO_TEST_CASE(TransposeConvolution2dPerAxisQuantTestNchw,
index 9d41c9e9e7ac77be8e8440ccc49d256b9193f62e..faabdcdb3f6d120fce7bac20d90b395c450a2534 100644 (file)
@@ -79,14 +79,14 @@ inline std::unique_ptr<Decoder<float>> MakeDecoder(const TensorInfo& info, const
                 params.second,
                 params.first);
         }
-        case DataType::QuantisedAsymm8:
+        case DataType::QAsymmU8:
         {
             return std::make_unique<QASymm8Decoder>(
                 static_cast<const uint8_t*>(data),
                 info.GetQuantizationScale(),
                 info.GetQuantizationOffset());
         }
-        case DataType::QuantisedSymm16:
+        case DataType::QSymmS16:
         {
             return std::make_unique<QSymm16Decoder>(
                 static_cast<const int16_t*>(data),
index 92493ed6417996b8bd2160a8317794af1ded83be..4fe202f0bf4e58078558009f57ed71bd3bc79e0d 100644 (file)
@@ -30,7 +30,7 @@ inline std::unique_ptr<Encoder<float>> MakeEncoder(const TensorInfo& info, void*
                 params.second,
                 params.first);
         }
-        case armnn::DataType::QuantisedAsymm8:
+        case armnn::DataType::QAsymmU8:
         {
             return std::make_unique<QASymm8Encoder>(
                 static_cast<uint8_t*>(data),
@@ -44,7 +44,7 @@ inline std::unique_ptr<Encoder<float>> MakeEncoder(const TensorInfo& info, void*
                     info.GetQuantizationScale(),
                     info.GetQuantizationOffset());
         }
-        case armnn::DataType::QuantisedSymm16:
+        case armnn::DataType::QSymmS16:
         {
             return std::make_unique<QSymm16Encoder>(
                 static_cast<int16_t*>(data),
index 2a3883f8f747d20b45c9b2e9c174f38c899ac394..9572f9a2a5921353a1f086d013da4cf31852bece 100644 (file)
@@ -46,7 +46,7 @@ void RefDebugWorkload<DataType>::RegisterDebugCallback(const DebugCallbackFuncti
 
 template class RefDebugWorkload<DataType::Float16>;
 template class RefDebugWorkload<DataType::Float32>;
-template class RefDebugWorkload<DataType::QuantisedAsymm8>;
-template class RefDebugWorkload<DataType::QuantisedSymm16>;
+template class RefDebugWorkload<DataType::QAsymmU8>;
+template class RefDebugWorkload<DataType::QSymmS16>;
 
 } // namespace armnn
index 0964515b2c74cfbd3f4fd0ef8777bb3f0825db01..fc154e94573f35bf1dc9d3ee141571d843af28ea 100644 (file)
@@ -39,7 +39,7 @@ private:
 
 using RefDebugFloat16Workload = RefDebugWorkload<DataType::Float16>;
 using RefDebugFloat32Workload = RefDebugWorkload<DataType::Float32>;
-using RefDebugQAsymm8Workload = RefDebugWorkload<DataType::QuantisedAsymm8>;
-using RefDebugQSymm16Workload = RefDebugWorkload<DataType::QuantisedSymm16>;
+using RefDebugQAsymm8Workload = RefDebugWorkload<DataType::QAsymmU8>;
+using RefDebugQSymm16Workload = RefDebugWorkload<DataType::QSymmS16>;
 
 } // namespace armnn
index c4b9daeb4c0acfd3ccea120eaa90a5a6836289bd..356f6b11720ecdfb2a9d1e665316b16b35cd2d21 100644 (file)
@@ -35,7 +35,7 @@ void RefPadWorkload<DataType>::Execute() const
 
 template class RefPadWorkload<DataType::Float32>;
 template class RefPadWorkload<DataType::Float16>;
-template class RefPadWorkload<DataType::QuantisedAsymm8>;
-template class RefPadWorkload<DataType::QuantisedSymm16>;
+template class RefPadWorkload<DataType::QAsymmU8>;
+template class RefPadWorkload<DataType::QSymmS16>;
 
 } //namespace armnn
\ No newline at end of file
index d1521f4f8d767c8d5e30f2d085a7474872f7a069..28fb55386e0f795a9e99594338e6bea5a8687f41 100644 (file)
@@ -32,7 +32,7 @@ public:
 
 using RefPadFloat32Workload = RefPadWorkload<DataType::Float32>;
 using RefPadFloat16Workload = RefPadWorkload<DataType::Float16>;
-using RefPadQAsymm8Workload = RefPadWorkload<DataType::QuantisedAsymm8>;
-using RefPadQSymm16Workload = RefPadWorkload<DataType::QuantisedSymm16>;
+using RefPadQAsymm8Workload = RefPadWorkload<DataType::QAsymmU8>;
+using RefPadQSymm16Workload = RefPadWorkload<DataType::QSymmS16>;
 
 } //namespace armnn
index 4e7b76bf0a9554c1d2fbd6bedf2ca5927880d8f2..d0e1431ffdfbe0f6758386bc8008722c1e72b5e1 100644 (file)
@@ -30,7 +30,7 @@ void RefPermuteWorkload<DataType>::Execute() const
 
 template class RefPermuteWorkload<DataType::Float16>;
 template class RefPermuteWorkload<DataType::Float32>;
-template class RefPermuteWorkload<DataType::QuantisedAsymm8>;
-template class RefPermuteWorkload<DataType::QuantisedSymm16>;
+template class RefPermuteWorkload<DataType::QAsymmU8>;
+template class RefPermuteWorkload<DataType::QSymmS16>;
 
 } //namespace armnn
index 1e69afb0ed978f51c390291b26f14f1781bb1830..00a33850aadd01275a6bc3dce18dc45432f6d7b0 100644 (file)
@@ -29,7 +29,7 @@ public:
 
 using RefPermuteFloat16Workload = RefPermuteWorkload<DataType::Float16>;
 using RefPermuteFloat32Workload = RefPermuteWorkload<DataType::Float32>;
-using RefPermuteQAsymm8Workload = RefPermuteWorkload<DataType::QuantisedAsymm8>;
-using RefPermuteQSymm16Workload = RefPermuteWorkload<DataType::QuantisedSymm16>;
+using RefPermuteQAsymm8Workload = RefPermuteWorkload<DataType::QAsymmU8>;
+using RefPermuteQSymm16Workload = RefPermuteWorkload<DataType::QSymmS16>;
 
 } //namespace armnn
\ No newline at end of file
index a78804b709791db461f2dfcb60b1d7f4dbaecb87..31534abe3e9c8d05a87029671d7a783dbae6bca1 100644 (file)
@@ -43,7 +43,7 @@ void RefQuantizeWorkload::Execute() const
 
     switch(m_TargetType)
     {
-        case DataType::QuantisedAsymm8:
+        case DataType::QAsymmU8:
         {
             QuantizeImpl<uint8_t>(input, output, m_NumElements, m_Scale, m_Offset);
             break;
@@ -53,7 +53,7 @@ void RefQuantizeWorkload::Execute() const
             QuantizeImpl<int8_t>(input, output, m_NumElements, m_Scale, m_Offset);
             break;
         }
-        case DataType::QuantisedSymm16:
+        case DataType::QSymmS16:
         {
             QuantizeImpl<int16_t>(input, output, m_NumElements, m_Scale, 0);
             break;
index a690e3fecec614de4835bcdae5c2945572084c0b..81523775db74291652a827fd6e444b857b5389f8 100644 (file)
@@ -83,7 +83,7 @@ inline auto ParseDataArray<armnn::DataType::Signed32>(std::istream & stream)
 }
 
 template<>
-inline auto ParseDataArray<armnn::DataType::QuantisedAsymm8>(std::istream& stream,
+inline auto ParseDataArray<armnn::DataType::QAsymmU8>(std::istream& stream,
                                                       const float& quantizationScale,
                                                       const int32_t& quantizationOffset)
 {
index 4e8fe78ad89ce8ab2fe220d884caafa90d02084c..9c23200802f2d579abf1c60d66a3a72490a9af28 100644 (file)
@@ -225,7 +225,7 @@ public:
         }
         else if (m_OutputType == "qasymm8")
         {
-            return armnn::DataType::QuantisedAsymm8;
+            return armnn::DataType::QAsymmU8;
         }
         else
         {
@@ -272,7 +272,7 @@ int main(int argc, char* argv[])
                 imageDataContainers.push_back(PrepareImageTensor<int>(
                     imagePath, newWidth, newHeight, normParams, batchSize, outputLayout));
                 break;
-            case armnn::DataType::QuantisedAsymm8:
+            case armnn::DataType::QAsymmU8:
                 imageDataContainers.push_back(PrepareImageTensor<uint8_t>(
                     imagePath, newWidth, newHeight, normParams, batchSize, outputLayout));
                 break;
index b1cb5e36f55d23ae14843626d7149bc8940fb7c7..4793f822fbe4b1f1428cc4bf87a18ae46d073abc 100644 (file)
@@ -60,7 +60,7 @@ NormalizationParameters GetNormalizationParameters(const SupportedFrontend& mode
                 case armnn::DataType::Signed32:
                     normParams.mean = { 128.0, 128.0, 128.0 };
                     break;
-                case armnn::DataType::QuantisedAsymm8:
+                case armnn::DataType::QAsymmU8:
                 default:
                     break;
             }
index 0d7d7689e31762f8fa16103fd562067921d39c70..ecfc21209c5971065b1a1a42e8931fa210085d32 100644 (file)
@@ -322,7 +322,7 @@ int main(int argc, char* argv[])
                             inputTensorDataLayout));
                         outputDataContainers = { vector<int>(outputNumElements) };
                         break;
-                    case armnn::DataType::QuantisedAsymm8:
+                    case armnn::DataType::QAsymmU8:
                         inputDataContainers.push_back(
                             PrepareImageTensor<uint8_t>(imagePath.string(),
                             inputTensorWidth, inputTensorHeight,
index f9e9b146d46aad69a75b7007ca8c1cfa53e6b632..ff460dd85eb8ae82eed9a581543dc33282cea1db 100644 (file)
@@ -145,14 +145,14 @@ auto ParseDataArray<armnn::DataType::Signed32>(std::istream & stream)
 }
 
 template<>
-auto ParseDataArray<armnn::DataType::QuantisedAsymm8>(std::istream& stream)
+auto ParseDataArray<armnn::DataType::QAsymmU8>(std::istream& stream)
 {
     return ParseArrayImpl<uint8_t>(stream,
                                    [](const std::string& s) { return boost::numeric_cast<uint8_t>(std::stoi(s)); });
 }
 
 template<>
-auto ParseDataArray<armnn::DataType::QuantisedAsymm8>(std::istream& stream,
+auto ParseDataArray<armnn::DataType::QAsymmU8>(std::istream& stream,
                                                       const float& quantizationScale,
                                                       const int32_t& quantizationOffset)
 {
@@ -309,8 +309,8 @@ void PopulateTensorWithData(TContainer& tensorData,
             const int   qOffset = qParams.value().second;
 
             tensorData = readFromFile ?
-                ParseDataArray<armnn::DataType::QuantisedAsymm8>(inputTensorFile, qScale, qOffset) :
-                GenerateDummyTensorData<armnn::DataType::QuantisedAsymm8>(numElements);
+                ParseDataArray<armnn::DataType::QAsymmU8>(inputTensorFile, qScale, qOffset) :
+                GenerateDummyTensorData<armnn::DataType::QAsymmU8>(numElements);
         }
         else
         {
@@ -328,8 +328,8 @@ void PopulateTensorWithData(TContainer& tensorData,
     else if (dataTypeStr.compare("qasymm8") == 0)
     {
          tensorData = readFromFile ?
-            ParseDataArray<armnn::DataType::QuantisedAsymm8>(inputTensorFile) :
-            GenerateDummyTensorData<armnn::DataType::QuantisedAsymm8>(numElements);
+            ParseDataArray<armnn::DataType::QAsymmU8>(inputTensorFile) :
+            GenerateDummyTensorData<armnn::DataType::QAsymmU8>(numElements);
     }
     else
     {