type = armnn::DataType::Float32;
break;
case tflite::TensorType_INT8:
- if (tensorPtr->quantization->zero_point.size() == 1 && tensorPtr->quantization->zero_point[0] != 0)
+ if (tensorPtr->quantization->zero_point.size() == 1)
{
// Per-tensor
type = armnn::DataType::QAsymmS8;
quantizationScales,
dimensionMappings[boost::numeric_cast<unsigned int>(
tensorPtr->quantization->quantized_dimension)]);
-
return result;
}
}
tensorPtr,
tensorInfo,
permutationVector);
+ case armnn::DataType::QAsymmS8:
+ return CreateConstTensorAndStoreData<int8_t>(bufferPtr,
+ tensorPtr,
+ tensorInfo,
+ permutationVector);
case armnn::DataType::Signed32:
return CreateConstTensorAndStoreData<int32_t>(bufferPtr,
tensorPtr,
{{"outputTensor", { 0.0f, 1.5f, 7.5f, 49150.5f, -1.5f,-49152.0f }}});
}
- struct SimpleDequantizeFixtureQSymmS8 : DequantizeFixture
+ struct SimpleDequantizeFixtureQAsymmS8 : DequantizeFixture
{
- SimpleDequantizeFixtureQSymmS8() : DequantizeFixture("[ 1, 6 ]",
+ SimpleDequantizeFixtureQAsymmS8() : DequantizeFixture("[ 1, 6 ]",
"[ 1, 6 ]",
"INT8") {}
};
- BOOST_FIXTURE_TEST_CASE(SimpleDequantizeQSymmS8, SimpleDequantizeFixtureQSymmS8)
+ BOOST_FIXTURE_TEST_CASE(SimpleDequantizeQAsymmS8, SimpleDequantizeFixtureQAsymmS8)
{
- RunTest<2, armnn::DataType::QSymmS8 , armnn::DataType::Float32>(
+ RunTest<2, armnn::DataType::QAsymmS8 , armnn::DataType::Float32>(
0,
{{"inputTensor", { 0, 1, 5, 127, -128, -1 }}},
{{"outputTensor", { 0.0f, 1.5f, 7.5f, 190.5f, -192.0f, -1.5f }}});
DataType::Float16,
DataType::QAsymmS8,
DataType::QAsymmU8,
- DataType::QSymmS16,
- DataType::QSymmS8
+ DataType::QSymmS16
};
ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
{
DataType::Float32,
DataType::QAsymmU8,
- DataType::QSymmS8,
+ DataType::QAsymmS8,
DataType::QSymmS16,
DataType::Float16
};
{
DataType::Float16,
DataType::Float32,
+ DataType::QAsymmS8,
DataType::QAsymmU8,
- DataType::QSymmS8,
DataType::QSymmS16
};
DataType::Float16,
DataType::Signed32,
DataType::QAsymmU8,
+ DataType::QAsymmS8,
DataType::QSymmS8,
DataType::QSymmS16
};
DataType::Signed32,
DataType::QSymmS16,
DataType::QAsymmS8,
- DataType::QAsymmU8,
- DataType::QSymmS8
+ DataType::QAsymmU8
};
ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
DataType::Float16,
DataType::Float32,
DataType::Signed32,
+ DataType::QAsymmS8,
DataType::QAsymmU8,
- DataType::QSymmS8,
DataType::QSymmS16
};
std::array<DataType,6> supportedTypes = {
DataType::Float32,
DataType::Float16,
- DataType::QSymmS8,
DataType::QAsymmS8,
DataType::QAsymmU8,
DataType::QSymmS16
std::array<DataType,6> supportedTypes = {
DataType::Float32,
DataType::Float16,
- DataType::QSymmS8,
DataType::QAsymmS8,
DataType::QAsymmU8,
DataType::QSymmS16
{
DataType::Float32,
DataType::Float16,
- DataType::QSymmS8,
DataType::QAsymmU8,
+ DataType::QAsymmS8,
DataType::QSymmS16
};
bool RefLayerSupport::IsConstantSupported(const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported) const
{
- std::array<DataType,5> supportedTypes =
+ std::array<DataType,6> supportedTypes =
{
DataType::Float32,
DataType::Signed32,
DataType::QAsymmU8,
+ DataType::QAsymmS8,
DataType::QSymmS8,
DataType::QSymmS16
};
std::array<DataType,5> supportedTypes = {
DataType::Float32,
DataType::Float16,
- DataType::QSymmS8,
+ DataType::QAsymmS8,
DataType::QAsymmU8,
DataType::QSymmS16
};
{
bool supported = true;
- std::array<DataType,5> supportedTypes = {
+ std::array<DataType,6> supportedTypes = {
DataType::Float32,
DataType::Float16,
- DataType::QSymmS8,
DataType::QAsymmU8,
+ DataType::QAsymmS8,
DataType::QSymmS16
};
bool supported = true;
// Define supported output and inputs types.
- std::array<DataType,6> supportedTypes =
+ std::array<DataType,5> supportedTypes =
{
DataType::Float32,
DataType::Float16,
- DataType::QSymmS8,
DataType::QAsymmS8,
DataType::QAsymmU8,
DataType::QSymmS16
DataType::Signed32,
DataType::QAsymmS8,
DataType::QAsymmU8,
- DataType::QSymmS8,
DataType::QSymmS16
};
{
DataType::Float32,
DataType::Float16,
- DataType::QSymmS8,
DataType::QAsymmU8,
+ DataType::QAsymmS8,
DataType::QSymmS16
};
return IsDataType<DataType::QSymmS8>(info);
}
+bool IsQAsymmS8(const WorkloadInfo& info)
+{
+ return IsDataType<DataType::QAsymmS8>(info);
+}
+
+bool IsQAsymmU8(const WorkloadInfo& info)
+{
+ return IsDataType<DataType::QAsymmU8>(info);
+}
+
RefWorkloadFactory::RefWorkloadFactory(const std::shared_ptr<RefMemoryManager>& memoryManager)
: m_MemoryManager(memoryManager)
{
{
return std::make_unique<RefDebugQSymmS8Workload>(descriptor, info);
}
+ if (IsQAsymmU8(info))
+ {
+ return std::make_unique<RefDebugQAsymmU8Workload>(descriptor, info);
+ }
+ if (IsQAsymmS8(info))
+ {
+ return std::make_unique<RefDebugQAsymmS8Workload>(descriptor, info);
+ }
if (IsSigned32(info))
{
return std::make_unique<RefDebugSigned32Workload>(descriptor, info);
template class RefDebugWorkload<DataType::Float16>;
template class RefDebugWorkload<DataType::Float32>;
template class RefDebugWorkload<DataType::QAsymmU8>;
+template class RefDebugWorkload<DataType::QAsymmS8>;
template class RefDebugWorkload<DataType::QSymmS16>;
template class RefDebugWorkload<DataType::QSymmS8>;
template class RefDebugWorkload<DataType::Signed32>;
using RefDebugFloat16Workload = RefDebugWorkload<DataType::Float16>;
using RefDebugFloat32Workload = RefDebugWorkload<DataType::Float32>;
using RefDebugQAsymmU8Workload = RefDebugWorkload<DataType::QAsymmU8>;
+using RefDebugQAsymmS8Workload = RefDebugWorkload<DataType::QAsymmS8>;
using RefDebugQSymmS16Workload = RefDebugWorkload<DataType::QSymmS16>;
using RefDebugQSymmS8Workload = RefDebugWorkload<DataType::QSymmS8>;
using RefDebugSigned32Workload = RefDebugWorkload<DataType::Signed32>;