} //End of TEST_SUITE("Convolution2dTest_GpuAcc")
-void TransposeConvUint8Test(std::vector<armnn::BackendId>& backends)
+void TransposeConvInt8Test(std::vector<armnn::BackendId>& backends)
{
// Set input data
std::vector<int32_t> transposeTensorShape { 4 };
std::vector<int32_t> outputShape { 1, 3, 3, 1 };
std::vector<int32_t> transposeData = { 1, 3, 3, 1 };
- static std::vector<uint8_t> inputValues = { 1, 2, 3, 4 };
- std::vector<uint8_t> filterValues = { 0, 1, 2, 4 };
- std::vector<uint8_t> expectedOutputValues =
+ static std::vector<int8_t> inputValues = { 1, 2, 3, 4 };
+ std::vector<int8_t> filterValues = { 0, 1, 2, 4 };
+ std::vector<int8_t> expectedOutputValues =
{
0, 1, 2,
2, 11, 12,
};
tflite::Padding padding = tflite::Padding_VALID;
- TransposeConvTest<uint8_t>(backends,
- ::tflite::TensorType_UINT8,
+ TransposeConvTest<int8_t>(backends,
+ ::tflite::TensorType_INT8,
1, // strideX
1, // strideY
padding,
TransposeConvFp32Test(backends);
}
-TEST_CASE ("TransposeConv_Uint8_Test")
+TEST_CASE ("TransposeConv_Int8_Test")
{
std::vector <armnn::BackendId> backends = {armnn::Compute::CpuRef};
- TransposeConvUint8Test(backends);
+ TransposeConvInt8Test(backends);
}
} // End of TEST_SUITE(TransposeConv_CpuRef_Test)
TEST_CASE ("TransposeConv_Fp32_Test")
{
-std::vector <armnn::BackendId> backends = {armnn::Compute::CpuAcc};
-TransposeConvFp32Test(backends);
+ std::vector <armnn::BackendId> backends = {armnn::Compute::CpuAcc};
+ TransposeConvFp32Test(backends);
}
-TEST_CASE ("TransposeConv_Uint8_Test")
+TEST_CASE ("TransposeConv_Int8_Test")
{
-std::vector <armnn::BackendId> backends = {armnn::Compute::CpuAcc};
-TransposeConvUint8Test(backends);
+ std::vector <armnn::BackendId> backends = {armnn::Compute::CpuAcc};
+ TransposeConvInt8Test(backends);
}
} // End of TEST_SUITE(TransposeConv_CpuAcc_Test)
TEST_CASE ("TransposeConv_Fp32_Test")
{
-std::vector <armnn::BackendId> backends = {armnn::Compute::GpuAcc};
-TransposeConvFp32Test(backends);
+ std::vector <armnn::BackendId> backends = {armnn::Compute::GpuAcc};
+ TransposeConvFp32Test(backends);
}
-TEST_CASE ("TransposeConv_Uint8_Test")
+TEST_CASE ("TransposeConv_Int8_Test")
{
-std::vector <armnn::BackendId> backends = {armnn::Compute::GpuAcc};
-TransposeConvUint8Test(backends);
+ std::vector <armnn::BackendId> backends = {armnn::Compute::GpuAcc};
+ TransposeConvInt8Test(backends);
}
} // End of TEST_SUITE(TransposeConv_GpuAcc_Test)
expectedOutputValues);
}
-TEST_SUITE("QuantizationTests")
+TEST_SUITE("CpuRef_QuantizationTests")
{
-// Dequantize Operator Tests
-TEST_CASE ("DEQUANTIZE_UINT8_GpuAcc_Test")
+TEST_CASE ("DEQUANTIZE_UINT8_CpuRef_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc,
- armnn::Compute::CpuRef };
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
DequantizeUint8Test(backends);
}
-TEST_CASE ("DEQUANTIZE_UINT8_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc,
- armnn::Compute::CpuRef };
- DequantizeUint8Test(backends);
-}
-TEST_CASE ("DEQUANTIZE_INT8_GpuAcc_Test")
+TEST_CASE ("DEQUANTIZE_INT8_CpuRef_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc,
- armnn::Compute::CpuRef };
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
DequantizeInt8Test(backends);
}
-TEST_CASE ("DEQUANTIZE_INT8_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc,
- armnn::Compute::CpuRef };
- DequantizeInt8Test(backends);
-}
-TEST_CASE ("DEQUANTIZE_INT16_GpuAcc_Test")
+TEST_CASE ("DEQUANTIZE_INT16_CpuRef_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc,
- armnn::Compute::CpuRef };
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
DequantizeInt16Test(backends);
}
-TEST_CASE ("DEQUANTIZE_INT16_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc,
- armnn::Compute::CpuRef };
- DequantizeInt16Test(backends);
-}
-// Quantize Operator Tests
-TEST_CASE ("QUANTIZE_FLOAT32_UINT8_GpuAcc_Test")
+TEST_CASE ("QUANTIZE_FLOAT32_UINT8_CpuRef_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc,
- armnn::Compute::CpuRef };
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
QuantizeFloat32Uint8Test(backends);
}
-TEST_CASE ("QUANTIZE_FLOAT32_UINT8_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc,
- armnn::Compute::CpuRef };
- QuantizeFloat32Uint8Test(backends);
-}
-TEST_CASE ("QUANTIZE_FLOAT32_INT8_GpuAcc_Test")
+TEST_CASE ("QUANTIZE_FLOAT32_INT8_CpuRef_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc,
- armnn::Compute::CpuRef };
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
QuantizeFloat32Int8Test(backends);
}
-TEST_CASE ("QUANTIZE_FLOAT32_INT8_CpuAcc_Test")
-{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc,
- armnn::Compute::CpuRef };
- QuantizeFloat32Int8Test(backends);
-}
-TEST_CASE ("QUANTIZE_FLOAT32_INT16_GpuAcc_Test")
+TEST_CASE ("QUANTIZE_FLOAT32_INT16_CpuRef_Test")
{
std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
QuantizeFloat32Int16Test(backends);
}
-TEST_CASE ("QUANTIZE_FLOAT32_INT16_CpuAcc_Test")
+
+TEST_CASE ("QUANTIZE_INT16_INT16_CpuRef_Test")
{
std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- QuantizeFloat32Int16Test(backends);
+ QuantizeInt16Int16Test(backends);
}
-TEST_CASE ("QUANTIZE_INT16_INT16_GpuAcc_Test")
+
+TEST_CASE ("QUANTIZE_INT16_INT8_CpuRef_Test")
{
std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- QuantizeInt16Int16Test(backends);
+ QuantizeInt16Int8Test(backends);
}
-TEST_CASE ("QUANTIZE_INT16_INT16_CpuAcc_Test")
+
+
+TEST_CASE ("QUANTIZE_INT8_UINT8_CpuRef_Test")
{
std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- QuantizeInt16Int16Test(backends);
+ QuantizeInt8Uint8Test(backends);
}
-TEST_CASE ("QUANTIZE_INT16_INT8_GpuAcc_Test")
+
+TEST_CASE ("QUANTIZE_UINT8_INT8_CpuRef_Test")
{
std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- QuantizeInt16Int8Test(backends);
+ QuantizeUint8Int8Test(backends);
+}
+
}
-TEST_CASE ("QUANTIZE_INT16_INT8_CpuAcc_Test")
+TEST_SUITE("CpuAcc_QuantizationTests")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
- QuantizeInt16Int8Test(backends);
+
+// Dequantize Operator Tests
+TEST_CASE ("DEQUANTIZE_UINT8_CpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+ DequantizeUint8Test(backends);
}
-TEST_CASE ("QUANTIZE_INT8_UINT8_GpuAcc_Test")
+TEST_CASE ("DEQUANTIZE_INT8_CpuAcc_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc,
- armnn::Compute::CpuRef };
- QuantizeInt8Uint8Test(backends);
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+ DequantizeInt8Test(backends);
+}
+
+TEST_CASE ("DEQUANTIZE_INT16_CpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+ DequantizeInt16Test(backends);
+}
+
+// Quantize Operator Tests
+TEST_CASE ("QUANTIZE_FLOAT32_UINT8_CpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+ QuantizeFloat32Uint8Test(backends);
+}
+
+TEST_CASE ("QUANTIZE_FLOAT32_INT8_CpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+ QuantizeFloat32Int8Test(backends);
}
TEST_CASE ("QUANTIZE_INT8_UINT8_CpuAcc_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc,
- armnn::Compute::CpuRef };
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
QuantizeInt8Uint8Test(backends);
}
-TEST_CASE ("QUANTIZE_UINT8_INT8_GpuAcc_Test")
+TEST_CASE ("QUANTIZE_UINT8_INT8_CpuAcc_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc,
- armnn::Compute::CpuRef };
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
QuantizeUint8Int8Test(backends);
}
-TEST_CASE ("QUANTIZE_UINT8_INT8_CpuAcc_Test")
+}
+
+TEST_SUITE("GpuAcc_QuantizationTests")
+{
+
+// Dequantize Operator Tests
+TEST_CASE ("DEQUANTIZE_UINT8_GpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+ DequantizeUint8Test(backends);
+}
+
+TEST_CASE ("DEQUANTIZE_INT8_GpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+ DequantizeInt8Test(backends);
+}
+
+TEST_CASE ("DEQUANTIZE_INT16_GpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+ DequantizeInt16Test(backends);
+}
+
+// Quantize Operator Tests
+TEST_CASE ("QUANTIZE_FLOAT32_UINT8_GpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+ QuantizeFloat32Uint8Test(backends);
+}
+
+TEST_CASE ("QUANTIZE_FLOAT32_INT8_GpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+ QuantizeFloat32Int8Test(backends);
+}
+
+TEST_CASE ("QUANTIZE_INT8_UINT8_GpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+ QuantizeInt8Uint8Test(backends);
+}
+
+TEST_CASE ("QUANTIZE_UINT8_INT8_GpuAcc_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc,
- armnn::Compute::CpuRef };
+ std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
QuantizeUint8Int8Test(backends);
}