#include <stdint.h>
#include "BackendId.hpp"
#include "Exceptions.hpp"
+#include "Deprecated.hpp"
namespace armnn
{
{
Float16 = 0,
Float32 = 1,
- QuantisedAsymm8 = 2,
+ QAsymmU8 = 2,
Signed32 = 3,
Boolean = 4,
- QuantisedSymm16 = 5,
+ QSymmS16 = 5,
QuantizedSymm8PerAxis = 6,
- QSymmS8 = 7
+ QSymmS8 = 7,
+
+ QuantisedAsymm8 ARMNN_DEPRECATED_MSG("Use DataType::QAsymmU8 instead.") = QAsymmU8,
+ QuantisedSymm16 ARMNN_DEPRECATED_MSG("Use DataType::QSymmS16 instead.") = QSymmS16
};
enum class DataLayout
case DataType::Float16: return 2U;
case DataType::Float32:
case DataType::Signed32: return 4U;
- case DataType::QuantisedAsymm8: return 1U;
+ case DataType::QAsymmU8: return 1U;
case DataType::QSymmS8: return 1U;
case DataType::QuantizedSymm8PerAxis: return 1U;
- case DataType::QuantisedSymm16: return 2U;
+ case DataType::QSymmS16: return 2U;
case DataType::Boolean: return 1U;
default: return 0U;
}
{
case DataType::Float16: return "Float16";
case DataType::Float32: return "Float32";
- case DataType::QuantisedAsymm8: return "QAsymm8";
- case DataType::QSymmS8: return "QSymm8";
+ case DataType::QAsymmU8: return "QAsymmU8";
+ case DataType::QSymmS8: return "QSymmS8";
case DataType::QuantizedSymm8PerAxis: return "QSymm8PerAxis";
- case DataType::QuantisedSymm16: return "QSymm16";
+ case DataType::QSymmS16: return "QSymm16";
case DataType::Signed32: return "Signed32";
case DataType::Boolean: return "Boolean";
constexpr bool IsQuantizedType(DataType dataType)
{
- return dataType == DataType::QuantisedAsymm8 ||
+ return dataType == DataType::QAsymmU8 ||
dataType == DataType::QSymmS8 ||
- dataType == DataType::QuantisedSymm16 ||
+ dataType == DataType::QSymmS16 ||
dataType == DataType::QuantizedSymm8PerAxis;
}
struct QuantizerOptions
{
- QuantizerOptions() : QuantizerOptions(DataType::QuantisedAsymm8, false) {}
+ QuantizerOptions() : QuantizerOptions(DataType::QAsymmU8, false) {}
QuantizerOptions(DataType activationFormat) : QuantizerOptions(activationFormat, false) {}
template<>
inline bool CompatibleTypes<uint8_t>(DataType dataType)
{
- return dataType == DataType::Boolean || dataType == DataType::QuantisedAsymm8;
+ return dataType == DataType::Boolean || dataType == DataType::QAsymmU8;
}
template<>
template<>
inline bool CompatibleTypes<int16_t>(DataType dataType)
{
- return dataType == DataType::QuantisedSymm16;
+ return dataType == DataType::QSymmS16;
}
template<>
return float16FuncPtr(reasonIfUnsupported, std::forward<Params>(params)...);
case DataType::Float32:
return float32FuncPtr(reasonIfUnsupported, std::forward<Params>(params)...);
- case DataType::QuantisedAsymm8:
+ case DataType::QAsymmU8:
return uint8FuncPtr(reasonIfUnsupported, std::forward<Params>(params)...);
case DataType::Signed32:
return int32FuncPtr(reasonIfUnsupported, std::forward<Params>(params)...);
for (unsigned int i = 0; i < numOutputs; i++) {
OutputSlot& outputSlot = layer->GetOutputSlot(i);
TensorInfo info = outputSlot.GetTensorInfo();
- if (DataType::QuantisedAsymm8 == info.GetDataType()) {
+ if (DataType::QAsymmU8 == info.GetDataType()) {
if (0.f == info.GetQuantizationScale()) {
noErrors = false;
std::stringstream ss;
int NumBits() const override { return 8; }
- DataType GetDataType() const override { return DataType::QuantisedAsymm8; }
+ DataType GetDataType() const override { return DataType::QAsymmU8; }
};
struct QSymmS8QuantizationScheme : IQuantizationScheme
int NumBits() const override { return 16; }
- DataType GetDataType() const override { return DataType::QuantisedSymm16; }
+ DataType GetDataType() const override { return DataType::QSymmS16; }
};
} // namespace armnn
std::unique_ptr<IQuantizationScheme> quantizationScheme;
switch (m_Options.m_ActivationFormat)
{
- case DataType::QuantisedAsymm8:
+ case DataType::QAsymmU8:
quantizationScheme = std::make_unique<QAsymm8QuantizationScheme>();
break;
case DataType::QSymmS8:
quantizationScheme = std::make_unique<QSymmS8QuantizationScheme>();
break;
- case DataType::QuantisedSymm16:
+ case DataType::QSymmS16:
quantizationScheme = std::make_unique<QSymm16QuantizationScheme>();
break;
default:
BOOST_ASSERT_MSG(false, "Can't quantize unsupported data type");
}
- TensorInfo qInfo(tensor.GetInfo().GetShape(), DataType::QuantisedAsymm8, scale, offset);
+ TensorInfo qInfo(tensor.GetInfo().GetShape(), DataType::QAsymmU8, scale, offset);
return ConstTensor(qInfo, backing);
}
};
template<>
-struct ResolveTypeImpl<DataType::QuantisedAsymm8>
+struct ResolveTypeImpl<DataType::QAsymmU8>
{
using Type = uint8_t;
};
};
template<>
-struct ResolveTypeImpl<DataType::QuantisedSymm16>
+struct ResolveTypeImpl<DataType::QSymmS16>
{
using Type = int16_t;
};
bool TensorInfo::IsQuantized() const
{
- return m_DataType == DataType::QuantisedAsymm8 || m_DataType == DataType::QuantisedSymm16;
+ return m_DataType == DataType::QAsymmU8 || m_DataType == DataType::QSymmS16;
}
// ---
std::vector<uint8_t> inputToInputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> inputToInputWeightsDimensions = {1, 1, 3, 3};
ConstTensor inputToInputWeights(
- TensorInfo(4, inputToInputWeightsDimensions.data(), DataType::QuantisedAsymm8), inputToInputWeightsData);
+ TensorInfo(4, inputToInputWeightsDimensions.data(), DataType::QAsymmU8), inputToInputWeightsData);
std::vector<uint8_t> inputToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> inputToForgetWeightsDimensions = {1, 1, 3, 3};
ConstTensor inputToForgetWeights(
- TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::QuantisedAsymm8), inputToForgetWeightsData);
+ TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::QAsymmU8), inputToForgetWeightsData);
std::vector<uint8_t> inputToCellWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> inputToCellWeightsDimensions = {1, 1, 3, 3};
ConstTensor inputToCellWeights(
- TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::QuantisedAsymm8), inputToCellWeightsData);
+ TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::QAsymmU8), inputToCellWeightsData);
std::vector<uint8_t> inputToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> inputToOutputWeightsDimensions = {1, 1, 3, 3};
ConstTensor inputToOutputWeights(
- TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::QuantisedAsymm8), inputToOutputWeightsData);
+ TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::QAsymmU8), inputToOutputWeightsData);
std::vector<uint8_t> recurrentToInputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> recurrentToInputWeightsDimensions = {1, 1, 3, 3};
ConstTensor recurrentToInputWeights(TensorInfo(
- 4, recurrentToInputWeightsDimensions.data(), DataType::QuantisedAsymm8), recurrentToInputWeightsData);
+ 4, recurrentToInputWeightsDimensions.data(), DataType::QAsymmU8), recurrentToInputWeightsData);
std::vector<uint8_t> recurrentToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> recurrentToForgetWeightsDimensions = {1, 1, 3, 3};
ConstTensor recurrentToForgetWeights(TensorInfo(
- 4, recurrentToForgetWeightsDimensions.data(), DataType::QuantisedAsymm8), recurrentToForgetWeightsData);
+ 4, recurrentToForgetWeightsDimensions.data(), DataType::QAsymmU8), recurrentToForgetWeightsData);
std::vector<uint8_t> recurrentToCellWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> recurrentToCellWeightsDimensions = {1, 1, 3, 3};
ConstTensor recurrentToCellWeights(TensorInfo(
- 4, recurrentToCellWeightsDimensions.data(), DataType::QuantisedAsymm8), recurrentToCellWeightsData);
+ 4, recurrentToCellWeightsDimensions.data(), DataType::QAsymmU8), recurrentToCellWeightsData);
std::vector<uint8_t> recurrentToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> recurrentToOutputWeightsDimensions = {1, 1, 3, 3};
ConstTensor recurrentToOutputWeights(TensorInfo(
- 4, recurrentToOutputWeightsDimensions.data(), DataType::QuantisedAsymm8), recurrentToOutputWeightsData);
+ 4, recurrentToOutputWeightsDimensions.data(), DataType::QAsymmU8), recurrentToOutputWeightsData);
std::vector<int32_t> inputGateBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<uint8_t> inputToInputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> inputToInputWeightsDimensions = {1, 1, 3, 3};
ConstTensor inputToInputWeights(
- TensorInfo(4, inputToInputWeightsDimensions.data(), DataType::QuantisedAsymm8), inputToInputWeightsData);
+ TensorInfo(4, inputToInputWeightsDimensions.data(), DataType::QAsymmU8), inputToInputWeightsData);
std::vector<uint8_t> inputToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> inputToForgetWeightsDimensions = {1, 1, 3, 3};
ConstTensor inputToForgetWeights(
- TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::QuantisedAsymm8), inputToForgetWeightsData);
+ TensorInfo(4, inputToForgetWeightsDimensions.data(), DataType::QAsymmU8), inputToForgetWeightsData);
std::vector<uint8_t> inputToCellWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> inputToCellWeightsDimensions = {1, 1, 3, 3};
ConstTensor inputToCellWeights(
- TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::QuantisedAsymm8), inputToCellWeightsData);
+ TensorInfo(4, inputToCellWeightsDimensions.data(), DataType::QAsymmU8), inputToCellWeightsData);
std::vector<uint8_t> inputToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> inputToOutputWeightsDimensions = {1, 1, 3, 3};
ConstTensor inputToOutputWeights(
- TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::QuantisedAsymm8), inputToOutputWeightsData);
+ TensorInfo(4, inputToOutputWeightsDimensions.data(), DataType::QAsymmU8), inputToOutputWeightsData);
std::vector<uint8_t> recurrentToInputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> recurrentToInputWeightsDimensions = {1, 1, 3, 3};
ConstTensor recurrentToInputWeights(TensorInfo(
- 4, recurrentToInputWeightsDimensions.data(), DataType::QuantisedAsymm8), recurrentToInputWeightsData);
+ 4, recurrentToInputWeightsDimensions.data(), DataType::QAsymmU8), recurrentToInputWeightsData);
std::vector<uint8_t> recurrentToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> recurrentToForgetWeightsDimensions = {1, 1, 3, 3};
ConstTensor recurrentToForgetWeights(TensorInfo(
- 4, recurrentToForgetWeightsDimensions.data(), DataType::QuantisedAsymm8), recurrentToForgetWeightsData);
+ 4, recurrentToForgetWeightsDimensions.data(), DataType::QAsymmU8), recurrentToForgetWeightsData);
std::vector<uint8_t> recurrentToCellWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> recurrentToCellWeightsDimensions = {1, 1, 3, 3};
ConstTensor recurrentToCellWeights(TensorInfo(
- 4, recurrentToCellWeightsDimensions.data(), DataType::QuantisedAsymm8), recurrentToCellWeightsData);
+ 4, recurrentToCellWeightsDimensions.data(), DataType::QAsymmU8), recurrentToCellWeightsData);
std::vector<uint8_t> recurrentToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
std::vector<unsigned int> recurrentToOutputWeightsDimensions = {1, 1, 3, 3};
ConstTensor recurrentToOutputWeights(TensorInfo(
- 4, recurrentToOutputWeightsDimensions.data(), DataType::QuantisedAsymm8), recurrentToOutputWeightsData);
+ 4, recurrentToOutputWeightsDimensions.data(), DataType::QAsymmU8), recurrentToOutputWeightsData);
std::vector<int32_t> inputGateBiasData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
// Weights and bias tensor and quantization info
armnn::TensorInfo inputWeightsInfo({outputSize, inputSize},
- armnn::DataType::QuantisedAsymm8,
+ armnn::DataType::QAsymmU8,
weightsScale,
weightsOffset);
armnn::TensorInfo recurrentWeightsInfo({outputSize, outputSize},
- armnn::DataType::QuantisedAsymm8,
+ armnn::DataType::QAsymmU8,
weightsScale,
weightsOffset);
// Input/output tensor info and quantization info
armnn::TensorInfo inputInfo({numBatches , inputSize},
- armnn::DataType::QuantisedAsymm8,
+ armnn::DataType::QAsymmU8,
inputOutputScale,
inputOutputOffset);
armnn::TensorInfo cellStateInfo({numBatches , outputSize},
- armnn::DataType::QuantisedSymm16,
+ armnn::DataType::QSymmS16,
cellStateScale,
cellStateOffset);
armnn::TensorInfo outputStateInfo({numBatches , outputSize},
- armnn::DataType::QuantisedAsymm8,
+ armnn::DataType::QAsymmU8,
inputOutputScale,
inputOutputOffset);
Convolution2dLayer* const layer = graph.AddLayer<Convolution2dLayer>(layerDesc, "layer");
- float inputsQScale = DataType == armnn::DataType::QuantisedAsymm8 ? 1.0f : 0.0;
- float outputQScale = DataType == armnn::DataType::QuantisedAsymm8 ? 2.0f : 0.0;
+ float inputsQScale = DataType == armnn::DataType::QAsymmU8 ? 1.0f : 0.0;
+ float outputQScale = DataType == armnn::DataType::QAsymmU8 ? 2.0f : 0.0;
layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({ 2, 3, 3, 3 }, DataType, inputsQScale));
layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>
FullyConnectedLayer* const layer = graph.AddLayer<FullyConnectedLayer>(layerDesc, "layer");
- float inputsQScale = DataType == armnn::DataType::QuantisedAsymm8 ? 1.0f : 0.0;
- float outputQScale = DataType == armnn::DataType::QuantisedAsymm8 ? 2.0f : 0.0;
+ float inputsQScale = DataType == armnn::DataType::QAsymmU8 ? 1.0f : 0.0;
+ float outputQScale = DataType == armnn::DataType::QAsymmU8 ? 2.0f : 0.0;
layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({7, 20}, DataType, inputsQScale, 0));
layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({7}, GetBiasDataType(DataType), inputsQScale));
if (biasEnabled)
{
- constexpr armnn::DataType biasDataType = ( dataType == armnn::DataType::QuantisedAsymm8) ?
+ constexpr armnn::DataType biasDataType = ( dataType == armnn::DataType::QAsymmU8) ?
armnn::DataType::Signed32 : armnn::DataType::Float32;
TensorInfo biasTensorInfo(TensorShape({16}), biasDataType, 0.9f * 0.9f, 0);
// set the tensors in the network (NHWC format)
TensorInfo inputTensorInfo(TensorShape({ 1, 16, 16, 16 }), dataType);
- if (dataType == armnn::DataType::QuantisedAsymm8)
+ if (dataType == armnn::DataType::QAsymmU8)
{
inputTensorInfo.SetQuantizationOffset(0);
inputTensorInfo.SetQuantizationScale(0.9f);
}
TensorInfo outputTensorInfo(TensorShape({1, 16, 16, 16}), dataType);
- if (dataType == armnn::DataType::QuantisedAsymm8)
+ if (dataType == armnn::DataType::QAsymmU8)
{
outputTensorInfo.SetQuantizationOffset(0);
outputTensorInfo.SetQuantizationScale(0.9f);
BOOST_TEST((infoIn.GetDataType() == armnn::DataType::Float32));
const armnn::TensorInfo& infoOut = layer->GetOutputSlot(0).GetTensorInfo();
- BOOST_TEST((infoOut.GetDataType() == armnn::DataType::QuantisedAsymm8));
+ BOOST_TEST((infoOut.GetDataType() == armnn::DataType::QAsymmU8));
}
bool m_Visited = false;
armnn::TensorInfo infoIn({3,1}, armnn::DataType::Float32);
input->GetOutputSlot(0).SetTensorInfo(infoIn);
- armnn::TensorInfo infoOut({3,1}, armnn::DataType::QuantisedAsymm8);
+ armnn::TensorInfo infoOut({3,1}, armnn::DataType::QAsymmU8);
quantize->GetOutputSlot(0).SetTensorInfo(infoOut);
Test testQuantize;
BOOST_AUTO_TEST_CASE(DetectionPostProcessValidateTensorShapes)
{
Graph graph;
- armnn::TensorInfo boxEncodingsInfo({1, 10, 4}, DataType::QuantisedAsymm8);
- armnn::TensorInfo scoresInfo({1, 10, 4}, DataType::QuantisedAsymm8);
+ armnn::TensorInfo boxEncodingsInfo({1, 10, 4}, DataType::QAsymmU8);
+ armnn::TensorInfo scoresInfo({1, 10, 4}, DataType::QAsymmU8);
std::vector<uint8_t> anchorsVector(40);
- armnn::ConstTensor anchors(armnn::TensorInfo({10, 4}, armnn::DataType::QuantisedAsymm8), anchorsVector);
+ armnn::ConstTensor anchors(armnn::TensorInfo({10, 4}, armnn::DataType::QAsymmU8), anchorsVector);
- armnn::TensorInfo detectionBoxesInfo({1, 3, 4}, DataType::QuantisedAsymm8);
- armnn::TensorInfo detectionScoresInfo({1, 3}, DataType::QuantisedAsymm8);
- armnn::TensorInfo detectionClassesInfo({1, 3}, DataType::QuantisedAsymm8);
- armnn::TensorInfo numDetectionInfo({1}, DataType::QuantisedAsymm8);
+ armnn::TensorInfo detectionBoxesInfo({1, 3, 4}, DataType::QAsymmU8);
+ armnn::TensorInfo detectionScoresInfo({1, 3}, DataType::QAsymmU8);
+ armnn::TensorInfo detectionClassesInfo({1, 3}, DataType::QAsymmU8);
+ armnn::TensorInfo numDetectionInfo({1}, DataType::QAsymmU8);
Layer* input0 = graph.AddLayer<InputLayer>(0, "boxEncodings");
input0->GetOutputSlot().SetTensorInfo(boxEncodingsInfo);
{
switch (m_QuantizerOptions.m_ActivationFormat)
{
- case DataType::QuantisedAsymm8:
+ case DataType::QAsymmU8:
TestQuantizationParamsImpl(
- info, DataType::QuantisedAsymm8, qAsymm8Params.first, qAsymm8Params.second);
+ info, DataType::QAsymmU8, qAsymm8Params.first, qAsymm8Params.second);
break;
case DataType::QSymmS8:
TestQuantizationParamsImpl(
info, DataType::QSymmS8, qSymm8Params.first, qSymm8Params.second);
break;
- case DataType::QuantisedSymm16:
+ case DataType::QSymmS16:
TestQuantizationParamsImpl(
- info, DataType::QuantisedSymm16, qSymm16Params.first, qSymm16Params.second);
+ info, DataType::QSymmS16, qSymm16Params.first, qSymm16Params.second);
break;
default:
throw InvalidArgumentException("Unsupported quantization target");
void TestConstantQuantizationParams(const TensorInfo& info,
const OffsetScalePair& params,
- DataType dataType = DataType::QuantisedAsymm8)
+ DataType dataType = DataType::QAsymmU8)
{
boost::ignore_unused(dataType);
- TestQuantizationParamsImpl(info, DataType::QuantisedAsymm8, params.first, params.second);
+ TestQuantizationParamsImpl(info, DataType::QAsymmU8, params.first, params.second);
}
void TestBiasQuantizationParams(const TensorInfo& info,
const OffsetScalePair& qAsymm8Params,
const OffsetScalePair& qSymm8Params,
const OffsetScalePair& qSymm16Params,
- DataType dataType = DataType::QuantisedAsymm8)
+ DataType dataType = DataType::QAsymmU8)
{
switch (m_QuantizerOptions.m_ActivationFormat)
{
- case DataType::QuantisedAsymm8:
+ case DataType::QAsymmU8:
TestQuantizationParamsImpl(info, dataType, qAsymm8Params.first, qAsymm8Params.second);
break;
case DataType::QSymmS8:
TestQuantizationParamsImpl(info, dataType, qSymm8Params.first, qSymm8Params.second);
break;
- case DataType::QuantisedSymm16:
+ case DataType::QSymmS16:
TestQuantizationParamsImpl(info, dataType, qSymm16Params.first, qSymm16Params.second);
break;
default:
TestAdditionQuantization validatorQSymm8(qSymm8Options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
- const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+ const QuantizerOptions qSymm16options(DataType::QSymmS16);
INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork();
TestAdditionQuantization validatorQSymm16(qSymm16options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
TestActivationQuantization validatorQSymm8(qSymm8Options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
- const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+ const QuantizerOptions qSymm16options(DataType::QSymmS16);
INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork();
TestActivationQuantization validatorQSymm16(qSymm16options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
TestActivationQuantization validatorQSymm8(qSymm8Options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
- const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+ const QuantizerOptions qSymm16options(DataType::QSymmS16);
INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork();
TestActivationQuantization validatorQSymm16(qSymm16options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
TestActivationQuantization validatorQSymm8(qSymm8Options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
- const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+ const QuantizerOptions qSymm16options(DataType::QSymmS16);
INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork();
TestActivationQuantization validatorQSymm16(qSymm16options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
TestActivationQuantization validatorQSymm8(qSymm8Options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
- const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+ const QuantizerOptions qSymm16options(DataType::QSymmS16);
INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork();
TestActivationQuantization validatorQSymm16(qSymm16options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
TestBoundedReluActivationQuantization validatorQSymm8(qSymm8Options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
- const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+ const QuantizerOptions qSymm16options(DataType::QSymmS16);
INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork();
TestBoundedReluActivationQuantization validatorQSymm16(qSymm16options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
TestTanHActivationQuantization validatorQSymm8(qSymm8Options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
- const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+ const QuantizerOptions qSymm16options(DataType::QSymmS16);
INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork();
TestTanHActivationQuantization validatorQSymm16(qSymm16options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
TestLeakyReLuActivationQuantization validatorQSymm8(qSymm8Options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
- const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+ const QuantizerOptions qSymm16options(DataType::QSymmS16);
INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork();
TestLeakyReLuActivationQuantization validatorQSymm16(qSymm16options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
TestBatchNormalizationQuantization validatorQSymm8(qSymm8Options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
- const QuantizerOptions QQsymm16Options(DataType::QuantisedSymm16);
+ const QuantizerOptions QQsymm16Options(DataType::QSymmS16);
INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), QQsymm16Options)->ExportNetwork();
TestBatchNormalizationQuantization validatorQSymm16(QQsymm16Options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
// test QSymm16 quantization
- const QuantizerOptions Qsymm16Options(DataType::QuantisedSymm16);
+ const QuantizerOptions Qsymm16Options(DataType::QSymmS16);
INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), Qsymm16Options)->ExportNetwork();
TestDepthToSpaceQuantization validatorQSymm16(Qsymm16Options, inputShape, outputShape);
VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
TestFullyConnectedQuantization validatorQSymm8(qSymm8Options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
- const QuantizerOptions Qsymm16Options(DataType::QuantisedSymm16);
+ const QuantizerOptions Qsymm16Options(DataType::QSymmS16);
INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), Qsymm16Options)->ExportNetwork();
TestFullyConnectedQuantization validatorQSymm16(Qsymm16Options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
TestConv2dQuantization validatorQSymm8(qSymm8Options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
- const QuantizerOptions Qsymm16Options(DataType::QuantisedSymm16);
+ const QuantizerOptions Qsymm16Options(DataType::QSymmS16);
INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), Qsymm16Options)->ExportNetwork();
TestConv2dQuantization validatorQSymm16(Qsymm16Options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
TestDepthwiseConv2dQuantization validatorQSymm8(qSymm8Options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
- const QuantizerOptions Qsymm16Options(DataType::QuantisedSymm16);
+ const QuantizerOptions Qsymm16Options(DataType::QSymmS16);
INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), Qsymm16Options)->ExportNetwork();
TestDepthwiseConv2dQuantization validatorQSymm16(Qsymm16Options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
// test QSymm16 quantization
- const QuantizerOptions qSymm16Options(DataType::QuantisedSymm16);
+ const QuantizerOptions qSymm16Options(DataType::QSymmS16);
INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16Options)->ExportNetwork();
TestInstanceNormalizationQuantization validatorQSymm16(qSymm16Options, tensorShape, tensorShape);
VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
// test QuantisedSymm16 quantization
- const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+ const QuantizerOptions qSymm16options(DataType::QSymmS16);
INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork();
TestLogSoftmaxQuantization validatorQSymm16(qSymm16options, tensorShape, tensorShape);
VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
TestSoftmaxQuantization validatorQSymm8(qSymm8Options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
- const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+ const QuantizerOptions qSymm16options(DataType::QSymmS16);
INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork();
TestSoftmaxQuantization validatorQSymm16(qSymm16options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
armnn::UnimplementedException);
// test QuantisedSymm16 quantization
- const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+ const QuantizerOptions qSymm16options(DataType::QSymmS16);
BOOST_CHECK_THROW(INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork(),
armnn::UnimplementedException);
}
TestPermuteQuantization validatorQSymm8(qSymm8Options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
- const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+ const QuantizerOptions qSymm16options(DataType::QSymmS16);
INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork();
TestPermuteQuantization validatorQSymm16(qSymm16options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
TestSpaceToBatchQuantization validatorQSymm8(qSymm8Options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
- const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+ const QuantizerOptions qSymm16options(DataType::QSymmS16);
INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork();
TestSpaceToBatchQuantization validatorQSymm16(qSymm16options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
TestSpaceToDepthQuantization validatorQSymm8(qSymm8Options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
- const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+ const QuantizerOptions qSymm16options(DataType::QSymmS16);
INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork();
TestSpaceToDepthQuantization validatorQSymm16(qSymm16options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
TestPooling2dQuantization validatorQSymm8(qSymm8Options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
- const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+ const QuantizerOptions qSymm16options(DataType::QSymmS16);
INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork();
TestPooling2dQuantization validatorQSymm16(qSymm16options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
TestConstantQuantization validatorQSymm8(qSymm8Options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
- const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+ const QuantizerOptions qSymm16options(DataType::QSymmS16);
INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork();
TestConstantQuantization validatorQSymm16(qSymm16options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
TestAbsQuantization validatorQSymm8(qSymm8Options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
- const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+ const QuantizerOptions qSymm16options(DataType::QSymmS16);
INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork();
TestAbsQuantization validatorQSymm16(qSymm16options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
TestArgMinMaxQuantization validatorQSymm8(qSymm8Options, inputShape, outputShape);
VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
- const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+ const QuantizerOptions qSymm16options(DataType::QSymmS16);
INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork();
TestArgMinMaxQuantization validatorQSymm16(qSymm16options, inputShape, outputShape);
VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
// test QuantisedSymm16 quantization
- const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+ const QuantizerOptions qSymm16options(DataType::QSymmS16);
INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork();
TestComparisonQuantization validatorQSymm16(qSymm16options, tensorShape, tensorShape);
VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
concatLayer->GetOutputSlot(0).SetTensorInfo(info);
const QuantizerOptions qSymm8Options(DataType::QSymmS8);
- const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+ const QuantizerOptions qSymm16options(DataType::QSymmS16);
INetworkQuantizerPtr quantizerPtrQAsymm8 = INetworkQuantizer::Create(network.get());
INetworkQuantizerPtr quantizerPtrQSymm8 = INetworkQuantizer::Create(network.get(), qSymm8Options);
INetworkQuantizerPtr quantizerPtrQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options);
TestReshapeQuantization validatorQSymm8(qSymm8Options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
- const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+ const QuantizerOptions qSymm16options(DataType::QSymmS16);
INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork();
TestReshapeQuantization validatorQSymm16(qSymm16options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
TestSplitterQuantization validatorQSymm8(qSymm8Options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
- const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+ const QuantizerOptions qSymm16options(DataType::QSymmS16);
INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork();
TestSplitterQuantization validatorQSymm16(qSymm16options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
TestResizeQuantization validatorQSymm8(qSymm8Options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
- const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+ const QuantizerOptions qSymm16options(DataType::QSymmS16);
INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork();
TestResizeQuantization validatorQSymm16(qSymm16options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
TestStridedSliceQuantization validatorQSymm8(qSymm8Options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
- const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+ const QuantizerOptions qSymm16options(DataType::QSymmS16);
INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork();
TestStridedSliceQuantization validatorQSymm16(qSymm16options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
TestBatchToSpaceQuantization validatorQSymm8(qSymm8Options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
- const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+ const QuantizerOptions qSymm16options(DataType::QSymmS16);
INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork();
TestBatchToSpaceQuantization validatorQSymm16(qSymm16options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
TestPreluQuantization validatorQSymm8(qSymm8Options, inputShape, alphaShape, outputShape);
VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
- const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+ const QuantizerOptions qSymm16options(DataType::QSymmS16);
INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork();
TestPreluQuantization validatorQSymm16(qSymm16options, inputShape, alphaShape, outputShape);
VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
// test QSymm16 quantization
- const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+ const QuantizerOptions qSymm16options(DataType::QSymmS16);
INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork();
TestTransposeConvolution2dQuantization validatorQSymm16(qSymm16options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
TestStackQuantization validatorQSymm8(qSymm8Options, inputShape, inputShape);
VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
- const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+ const QuantizerOptions qSymm16options(DataType::QSymmS16);
INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork();
TestStackQuantization validatorQSymm16(qSymm16options, inputShape, outputShape);
VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
VisitLayersTopologically(quantizedNetworkQSymm8.get(), validatorQSymm8);
// test QSymm16 quantization
- const QuantizerOptions qSymm16options(DataType::QuantisedSymm16);
+ const QuantizerOptions qSymm16options(DataType::QSymmS16);
INetworkPtr quantizedNetworkQSymm16 = INetworkQuantizer::Create(network.get(), qSymm16options)->ExportNetwork();
TestSliceQuantization validatorQSymm16(qSymm16options, shape, shape);
VisitLayersTopologically(quantizedNetworkQSymm16.get(), validatorQSymm16);
addition->GetOutputSlot(0).SetTensorInfo(info);
QuantizerOptions options = dataType == DataType::Float32 ?
- QuantizerOptions(DataType::QuantisedAsymm8, true) : QuantizerOptions(dataType, true);
+ QuantizerOptions(DataType::QAsymmU8, true) : QuantizerOptions(dataType, true);
INetworkPtr quantizedNetworkQAsymm8 = INetworkQuantizer::Create(network.get(), options)->ExportNetwork();
TestPreserveType validatorQAsymm8(options, dataType, shape, shape);
BOOST_AUTO_TEST_CASE(PreserveTypeQAsymm8)
{
- PreserveTypeTestImpl(DataType::QuantisedAsymm8);
+ PreserveTypeTestImpl(DataType::QAsymmU8);
}
BOOST_AUTO_TEST_CASE(PreserveTypeQsymm8)
BOOST_AUTO_TEST_CASE(PreserveTypeQsymm16)
{
- PreserveTypeTestImpl(DataType::QuantisedSymm16);
+ PreserveTypeTestImpl(DataType::QSymmS16);
}
BOOST_AUTO_TEST_SUITE_END()
input->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo(
armnn::TensorShape({ 1, 5 }),
- armnn::DataType::QuantisedAsymm8,
+ armnn::DataType::QAsymmU8,
1.0f/255,
0
));
softmax->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo(
armnn::TensorShape({ 1, 5 }),
- armnn::DataType::QuantisedAsymm8
+ armnn::DataType::QAsymmU8
));
std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
TensorInfo info;
info.SetShape({ 5, 6, 7, 8 });
BOOST_TEST((info.GetShape() == TensorShape({ 5, 6, 7, 8 })));
- info.SetDataType(DataType::QuantisedAsymm8);
- BOOST_TEST((info.GetDataType() == DataType::QuantisedAsymm8));
+ info.SetDataType(DataType::QAsymmU8);
+ BOOST_TEST((info.GetDataType() == DataType::QAsymmU8));
info.SetQuantizationScale(10.0f);
BOOST_TEST(info.GetQuantizationScale() == 10.0f);
info.SetQuantizationOffset(5);
BOOST_AUTO_TEST_CASE(DataTypeSize)
{
BOOST_TEST(armnn::GetDataTypeSize(armnn::DataType::Float32) == 4);
- BOOST_TEST(armnn::GetDataTypeSize(armnn::DataType::QuantisedAsymm8) == 1);
+ BOOST_TEST(armnn::GetDataTypeSize(armnn::DataType::QAsymmU8) == 1);
BOOST_TEST(armnn::GetDataTypeSize(armnn::DataType::Signed32) == 4);
BOOST_TEST(armnn::GetDataTypeSize(armnn::DataType::Boolean) == 1);
}
switch (tensorPtr->dataType())
{
case DataType_QuantisedAsymm8:
- type = armnn::DataType::QuantisedAsymm8;
+ case DataType_QAsymmU8:
+ type = armnn::DataType::QAsymmU8;
break;
+ case DataType_QSymmS16:
case DataType_QuantisedSymm16:
- type = armnn::DataType::QuantisedSymm16;
+ type = armnn::DataType::QSymmS16;
break;
case DataType_Signed32:
type = armnn::DataType::Signed32;
BOOST_FIXTURE_TEST_CASE(ActivationReluQuantisedAsymm8, SimpleActivationFixture)
{
- RunTest<4, armnn::DataType::QuantisedAsymm8>(
+ RunTest<4, armnn::DataType::QAsymmU8>(
0,
{{"InputLayer", {10, 0, 2, 0}}},
{{"OutputLayer", {10, 0, 2, 0}}});
BOOST_FIXTURE_TEST_CASE(ActivationBoundedReluQuantisedAsymm8, SimpleActivationFixture3)
{
- RunTest<4, armnn::DataType::QuantisedAsymm8>(
+ RunTest<4, armnn::DataType::QAsymmU8>(
0,
{{"InputLayer", {10, 0, 2, 0}}},
{{"OutputLayer", {5, 0, 2, 0}}});
BOOST_FIXTURE_TEST_CASE(AddQuantisedAsymm8, SimpleAddFixture)
{
- RunTest<2, armnn::DataType::QuantisedAsymm8>(
+ RunTest<2, armnn::DataType::QAsymmU8>(
0,
{{"InputLayer1", { 0, 1, 2, 3 }},
{"InputLayer2", { 4, 5, 6, 7 }}},
DECLARE_SIMPLE_COMPARISON_TEST_CASE(LessOrEqual, Float32)
DECLARE_SIMPLE_COMPARISON_TEST_CASE(NotEqual, Float32)
+
+ARMNN_NO_DEPRECATE_WARN_BEGIN
DECLARE_SIMPLE_COMPARISON_TEST_CASE(Equal, QuantisedAsymm8)
DECLARE_SIMPLE_COMPARISON_TEST_CASE(Greater, QuantisedAsymm8)
DECLARE_SIMPLE_COMPARISON_TEST_CASE(GreaterOrEqual, QuantisedAsymm8)
DECLARE_SIMPLE_COMPARISON_TEST_CASE(Less, QuantisedAsymm8)
DECLARE_SIMPLE_COMPARISON_TEST_CASE(LessOrEqual, QuantisedAsymm8)
DECLARE_SIMPLE_COMPARISON_TEST_CASE(NotEqual, QuantisedAsymm8)
+ARMNN_NO_DEPRECATE_WARN_END
+
+DECLARE_SIMPLE_COMPARISON_TEST_CASE(Equal, QAsymmU8)
+DECLARE_SIMPLE_COMPARISON_TEST_CASE(Greater, QAsymmU8)
+DECLARE_SIMPLE_COMPARISON_TEST_CASE(GreaterOrEqual, QAsymmU8)
+DECLARE_SIMPLE_COMPARISON_TEST_CASE(Less, QAsymmU8)
+DECLARE_SIMPLE_COMPARISON_TEST_CASE(LessOrEqual, QAsymmU8)
+DECLARE_SIMPLE_COMPARISON_TEST_CASE(NotEqual, QAsymmU8)
BOOST_AUTO_TEST_SUITE_END()
BOOST_FIXTURE_TEST_CASE(SimpleConstantAddQuantisedAsymm8, SimpleConstantAddFixture)
{
- RunTest<2, armnn::DataType::QuantisedAsymm8>(
+ RunTest<2, armnn::DataType::QAsymmU8>(
0,
{ 1, 2, 3, 4, 5, 6 },
{ 2, 4, 6, 8, 10, 12 });
BOOST_FIXTURE_TEST_CASE(DivisionQuantisedAsymm8, SimpleDivisionFixture)
{
- RunTest<2, armnn::DataType::QuantisedAsymm8>(
+ RunTest<2, armnn::DataType::QAsymmU8>(
0,
{{"InputLayer1", { 0, 5, 24, 21 }},
{"InputLayer2", { 4, 1, 6, 7 }}},
BOOST_FIXTURE_TEST_CASE(FullyConnectedWithNoBias, FullyConnectedWithNoBiasFixture)
{
- RunTest<2, armnn::DataType::QuantisedAsymm8>(
+ RunTest<2, armnn::DataType::QAsymmU8>(
0,
{{"InputLayer", { 10, 20, 30, 40 }}},
{{"OutputLayer", { 400/2 }}});
BOOST_FIXTURE_TEST_CASE(MultiplicationQuantisedAsymm8, SimpleMultiplicationFixture)
{
- RunTest<2, armnn::DataType::QuantisedAsymm8>(
+ RunTest<2, armnn::DataType::QAsymmU8>(
0,
{{"InputLayer1", { 0, 1, 2, 3 }},
{"InputLayer2", { 4, 5, 6, 7 }}},
BOOST_FIXTURE_TEST_CASE(SimplePadQuantisedAsymm8, SimplePadFixture)
{
- RunTest<3, armnn::DataType::QuantisedAsymm8>(0,
+ RunTest<3, armnn::DataType::QAsymmU8>(0,
{
0, 4, 2, 5, 6, 1, 5, 2
},
BOOST_FIXTURE_TEST_CASE(SimplePermute2DQuantisedAsymm8, SimplePermute2DFixture)
{
- RunTest<2, armnn::DataType::QuantisedAsymm8>(0,
+ RunTest<2, armnn::DataType::QAsymmU8>(0,
{ 1, 2, 3, 4, 5, 6 },
{ 1, 4, 2, 5, 3, 6 });
}
BOOST_FIXTURE_TEST_CASE(SimplePermute4DQuantisedAsymm8, SimplePermute4DFixture)
{
- RunTest<4, armnn::DataType::QuantisedAsymm8>(0,
+ RunTest<4, armnn::DataType::QAsymmU8>(0,
{ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
{ 1, 13, 5, 17, 9, 21, 2, 14, 6, 18, 10, 22,
BOOST_FIXTURE_TEST_CASE(Pooling2dQuantisedAsymm8Avg, SimpleAvgPooling2dFixture2)
{
- RunTest<4, armnn::DataType::QuantisedAsymm8>(0,
+ RunTest<4, armnn::DataType::QAsymmU8>(0,
{ 20, 40, 60, 80 },
{ 50 });
}
BOOST_FIXTURE_TEST_CASE(Pooling2dQuantisedAsymm8Max, SimpleMaxPooling2dFixture2)
{
- RunTest<4, armnn::DataType::QuantisedAsymm8>(0,
+ RunTest<4, armnn::DataType::QAsymmU8>(0,
{ 20, 40, 60, 80 },
{ 80 });
}
BOOST_FIXTURE_TEST_CASE(ReshapeQuantisedAsymm8, SimpleReshapeFixture)
{
- RunTest<2, armnn::DataType::QuantisedAsymm8>(0,
+ RunTest<2, armnn::DataType::QAsymmU8>(0,
{ 1, 2, 3, 4, 5, 6, 7, 8, 9 },
{ 1, 2, 3, 4, 5, 6, 7, 8, 9 });
}
BOOST_FIXTURE_TEST_CASE(SubtractionQuantisedAsymm8, SimpleSubtractionFixture)
{
- RunTest<2, armnn::DataType::QuantisedAsymm8>(
+ RunTest<2, armnn::DataType::QAsymmU8>(
0,
{{"inputLayer1", { 4, 5, 6, 7 }},
{"inputLayer2", { 3, 2, 1, 0 }}},
armnn::QuantizerOptions quantizerOptions;
quantizerOptions.m_ActivationFormat = cmdline.GetQuantizationScheme() == "QSymm16"
- ? armnn::DataType::QuantisedSymm16
- : armnn::DataType::QuantisedAsymm8;
+ ? armnn::DataType::QSymmS16
+ : armnn::DataType::QAsymmU8;
quantizerOptions.m_PreserveType = cmdline.HasPreservedDataType();
enum DataType : byte {
Float16 = 0,
Float32 = 1,
- QuantisedAsymm8 = 2,
+ QuantisedAsymm8 = 2, // deprecated
Signed32 = 3,
Boolean = 4,
- QuantisedSymm16 = 5
+ QuantisedSymm16 = 5, // deprecated
+ QAsymmU8 = 6,
+ QSymmS16 = 7
}
enum DataLayout : byte {
fbPayload = flatBuffersData.o;
break;
}
- case armnn::DataType::QuantisedSymm16:
+ case armnn::DataType::QSymmS16:
{
auto fbVector = CreateDataVector<int16_t>(constTensor.GetMemoryArea(), constTensor.GetNumBytes());
flatbuffers::Offset<serializer::ShortData> flatBuffersData = serializer::CreateShortData(
fbPayload = flatBuffersData.o;
break;
}
- case armnn::DataType::QuantisedAsymm8:
+ case armnn::DataType::QAsymmU8:
case armnn::DataType::Boolean:
default:
{
case armnn::DataType::Signed32:
return armnnSerializer::ConstTensorData::ConstTensorData_IntData;
case armnn::DataType::Float16:
- case armnn::DataType::QuantisedSymm16:
+ case armnn::DataType::QSymmS16:
return armnnSerializer::ConstTensorData::ConstTensorData_ShortData;
- case armnn::DataType::QuantisedAsymm8:
+ case armnn::DataType::QAsymmU8:
case armnn::DataType::Boolean:
return armnnSerializer::ConstTensorData::ConstTensorData_ByteData;
default:
return armnnSerializer::DataType::DataType_Float16;
case armnn::DataType::Signed32:
return armnnSerializer::DataType::DataType_Signed32;
- case armnn::DataType::QuantisedSymm16:
- return armnnSerializer::DataType::DataType_QuantisedSymm16;
- case armnn::DataType::QuantisedAsymm8:
- return armnnSerializer::DataType::DataType_QuantisedAsymm8;
+ case armnn::DataType::QSymmS16:
+ return armnnSerializer::DataType::DataType_QSymmS16;
+ case armnn::DataType::QAsymmU8:
+ return armnnSerializer::DataType::DataType_QAsymmU8;
case armnn::DataType::Boolean:
return armnnSerializer::DataType::DataType_Boolean;
default:
CompareConstTensorData<const float*>(
tensor1.GetMemoryArea(), tensor2.GetMemoryArea(), tensor1.GetNumElements());
break;
- case armnn::DataType::QuantisedAsymm8:
+ case armnn::DataType::QAsymmU8:
case armnn::DataType::Boolean:
CompareConstTensorData<const uint8_t*>(
tensor1.GetMemoryArea(), tensor2.GetMemoryArea(), tensor1.GetNumElements());
DECLARE_LAYER_VERIFIER_CLASS(Dequantize)
const std::string layerName("dequantize");
- const armnn::TensorInfo inputInfo({ 1, 5, 2, 3 }, armnn::DataType::QuantisedAsymm8, 0.5f, 1);
+ const armnn::TensorInfo inputInfo({ 1, 5, 2, 3 }, armnn::DataType::QAsymmU8, 0.5f, 1);
const armnn::TensorInfo outputInfo({ 1, 5, 2, 3 }, armnn::DataType::Float32);
armnn::INetworkPtr network = armnn::INetwork::Create();
};
const std::string layerName("gather");
- armnn::TensorInfo paramsInfo({ 8 }, armnn::DataType::QuantisedAsymm8);
- armnn::TensorInfo outputInfo({ 3 }, armnn::DataType::QuantisedAsymm8);
+ armnn::TensorInfo paramsInfo({ 8 }, armnn::DataType::QAsymmU8);
+ armnn::TensorInfo outputInfo({ 3 }, armnn::DataType::QAsymmU8);
const armnn::TensorInfo indicesInfo({ 3 }, armnn::DataType::Signed32);
paramsInfo.SetQuantizationScale(1.0f);
armnn::TensorShape inputToInputWeightsShape = {4, 2};
std::vector<uint8_t> inputToInputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8};
armnn::TensorInfo inputToInputWeightsInfo(inputToInputWeightsShape,
- armnn::DataType::QuantisedAsymm8,
+ armnn::DataType::QAsymmU8,
weightsScale,
weightsOffset);
armnn::ConstTensor inputToInputWeights(inputToInputWeightsInfo, inputToInputWeightsData);
armnn::TensorShape inputToForgetWeightsShape = {4, 2};
std::vector<uint8_t> inputToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8};
armnn::TensorInfo inputToForgetWeightsInfo(inputToForgetWeightsShape,
- armnn::DataType::QuantisedAsymm8,
+ armnn::DataType::QAsymmU8,
weightsScale,
weightsOffset);
armnn::ConstTensor inputToForgetWeights(inputToForgetWeightsInfo, inputToForgetWeightsData);
armnn::TensorShape inputToCellWeightsShape = {4, 2};
std::vector<uint8_t> inputToCellWeightsData = {1, 2, 3, 4, 5, 6, 7, 8};
armnn::TensorInfo inputToCellWeightsInfo(inputToCellWeightsShape,
- armnn::DataType::QuantisedAsymm8,
+ armnn::DataType::QAsymmU8,
weightsScale,
weightsOffset);
armnn::ConstTensor inputToCellWeights(inputToCellWeightsInfo, inputToCellWeightsData);
armnn::TensorShape inputToOutputWeightsShape = {4, 2};
std::vector<uint8_t> inputToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8};
armnn::TensorInfo inputToOutputWeightsInfo(inputToOutputWeightsShape,
- armnn::DataType::QuantisedAsymm8,
+ armnn::DataType::QAsymmU8,
weightsScale,
weightsOffset);
armnn::ConstTensor inputToOutputWeights(inputToOutputWeightsInfo, inputToOutputWeightsData);
armnn::TensorShape recurrentToInputWeightsShape = {4, 4};
std::vector<uint8_t> recurrentToInputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16};
armnn::TensorInfo recurrentToInputWeightsInfo(recurrentToInputWeightsShape,
- armnn::DataType::QuantisedAsymm8,
+ armnn::DataType::QAsymmU8,
weightsScale,
weightsOffset);
armnn::ConstTensor recurrentToInputWeights(recurrentToInputWeightsInfo, recurrentToInputWeightsData);
armnn::TensorShape recurrentToForgetWeightsShape = {4, 4};
std::vector<uint8_t> recurrentToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16};
armnn::TensorInfo recurrentToForgetWeightsInfo(recurrentToForgetWeightsShape,
- armnn::DataType::QuantisedAsymm8,
+ armnn::DataType::QAsymmU8,
weightsScale,
weightsOffset);
armnn::ConstTensor recurrentToForgetWeights(recurrentToForgetWeightsInfo, recurrentToForgetWeightsData);
armnn::TensorShape recurrentToCellWeightsShape = {4, 4};
std::vector<uint8_t> recurrentToCellWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16};
armnn::TensorInfo recurrentToCellWeightsInfo(recurrentToCellWeightsShape,
- armnn::DataType::QuantisedAsymm8,
+ armnn::DataType::QAsymmU8,
weightsScale,
weightsOffset);
armnn::ConstTensor recurrentToCellWeights(recurrentToCellWeightsInfo, recurrentToCellWeightsData);
armnn::TensorShape recurrentToOutputWeightsShape = {4, 4};
std::vector<uint8_t> recurrentToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16};
armnn::TensorInfo recurrentToOutputWeightsInfo(recurrentToOutputWeightsShape,
- armnn::DataType::QuantisedAsymm8,
+ armnn::DataType::QAsymmU8,
weightsScale,
weightsOffset);
armnn::ConstTensor recurrentToOutputWeights(recurrentToOutputWeightsInfo, recurrentToOutputWeightsData);
// Connect up
armnn::TensorInfo inputTensorInfo({ batchSize, inputSize },
- armnn::DataType::QuantisedAsymm8,
+ armnn::DataType::QAsymmU8,
inputOutputScale,
inputOutputOffset);
armnn::TensorInfo cellStateTensorInfo({ batchSize, numUnits },
- armnn::DataType::QuantisedSymm16,
+ armnn::DataType::QSymmS16,
cellStateScale,
cellStateOffset);
armnn::TensorInfo outputStateTensorInfo({ batchSize, outputSize },
- armnn::DataType::QuantisedAsymm8,
+ armnn::DataType::QAsymmU8,
inputOutputScale,
inputOutputOffset);
switch (tensorPtr->type)
{
case tflite::TensorType_UINT8:
- type = armnn::DataType::QuantisedAsymm8;
+ type = armnn::DataType::QAsymmU8;
break;
case tflite::TensorType_FLOAT32:
type = armnn::DataType::Float32;
type = armnn::DataType::QSymmS8;
break;
case tflite::TensorType_INT16:
- type = armnn::DataType::QuantisedSymm16;
+ type = armnn::DataType::QSymmS16;
break;
case tflite::TensorType_INT32:
type = armnn::DataType::Signed32;
tensorPtr,
tensorInfo,
permutationVector);
- case armnn::DataType::QuantisedAsymm8:
+ case armnn::DataType::QAsymmU8:
return CreateConstTensorAndStoreData<uint8_t>(bufferPtr,
tensorPtr,
tensorInfo,
BOOST_FIXTURE_TEST_CASE(SimpleAdd, SimpleAddFixture)
{
- RunTest<2, armnn::DataType::QuantisedAsymm8>(
+ RunTest<2, armnn::DataType::QAsymmU8>(
0,
{{"inputTensor1", { 0, 1, 2, 3 }},
{"inputTensor2", { 4, 5, 6, 7 }}},
BOOST_FIXTURE_TEST_CASE(AvgPoolLite1DOutput, AvgPoolLiteFixtureUint1DOutput)
{
- RunTest<4, armnn::DataType::QuantisedAsymm8>(0, {2, 3, 5, 2 }, { 3 });
+ RunTest<4, armnn::DataType::QAsymmU8>(0, {2, 3, 5, 2 }, { 3 });
}
BOOST_FIXTURE_TEST_CASE(AvgPoolLiteFloat1DOutput, AvgPoolLiteFixtureFloat1DOutput)
BOOST_FIXTURE_TEST_CASE(AvgPoolLite2DOutput, AvgPoolLiteFixture2DOutput)
{
- RunTest<4, armnn::DataType::QuantisedAsymm8>(
+ RunTest<4, armnn::DataType::QAsymmU8>(
0, { 1, 2, 2, 3, 5, 6, 7, 8, 3, 2, 1, 0, 1, 2, 3, 4 }, { 4, 5, 2, 2 });
}
BOOST_FIXTURE_TEST_CASE(IncorrectDataTypeError, AvgPoolLiteFixtureFloat1DOutput)
{
- BOOST_CHECK_THROW((RunTest<4, armnn::DataType::QuantisedAsymm8>(0, {2, 3, 5, 2 }, { 3 })), armnn::Exception);
+ BOOST_CHECK_THROW((RunTest<4, armnn::DataType::QAsymmU8>(0, {2, 3, 5, 2 }, { 3 })), armnn::Exception);
}
BOOST_AUTO_TEST_SUITE_END()
BOOST_FIXTURE_TEST_CASE(ParseConcatenationNegativeDim, ConcatenationFixtureNegativeDim)
{
- RunTest<4, armnn::DataType::QuantisedAsymm8>(
+ RunTest<4, armnn::DataType::QAsymmU8>(
0,
{{"inputTensor1", { 0, 1, 2, 3 }},
{"inputTensor2", { 4, 5, 6, 7 }}},
BOOST_FIXTURE_TEST_CASE(ParseConcatenationNCHW, ConcatenationFixtureNCHW)
{
- RunTest<4, armnn::DataType::QuantisedAsymm8>(
+ RunTest<4, armnn::DataType::QAsymmU8>(
0,
{{"inputTensor1", { 0, 1, 2, 3 }},
{"inputTensor2", { 4, 5, 6, 7 }}},
BOOST_FIXTURE_TEST_CASE(ParseConcatenationNHWC, ConcatenationFixtureNHWC)
{
- RunTest<4, armnn::DataType::QuantisedAsymm8>(
+ RunTest<4, armnn::DataType::QAsymmU8>(
0,
{{"inputTensor1", { 0, 1, 2, 3 }},
{"inputTensor2", { 4, 5, 6, 7 }}},
BOOST_FIXTURE_TEST_CASE(ParseConcatenationDim1, ConcatenationFixtureDim1)
{
- RunTest<4, armnn::DataType::QuantisedAsymm8>(
+ RunTest<4, armnn::DataType::QAsymmU8>(
0,
{ { "inputTensor1", { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23 } },
BOOST_FIXTURE_TEST_CASE(ParseConcatenationDim3, ConcatenationFixtureDim3)
{
- RunTest<4, armnn::DataType::QuantisedAsymm8>(
+ RunTest<4, armnn::DataType::QAsymmU8>(
0,
{ { "inputTensor1", { 0, 1, 2, 3,
4, 5, 6, 7,
BOOST_FIXTURE_TEST_CASE(ParseConcatenation3DDim0, ConcatenationFixture3DDim0)
{
- RunTest<3, armnn::DataType::QuantisedAsymm8>(
+ RunTest<3, armnn::DataType::QAsymmU8>(
0,
{ { "inputTensor1", { 0, 1, 2, 3, 4, 5 } },
{ "inputTensor2", { 6, 7, 8, 9, 10, 11,
BOOST_FIXTURE_TEST_CASE(ParseConcatenation3DDim1, ConcatenationFixture3DDim1)
{
- RunTest<3, armnn::DataType::QuantisedAsymm8>(
+ RunTest<3, armnn::DataType::QAsymmU8>(
0,
{ { "inputTensor1", { 0, 1, 2, 3, 4, 5 } },
{ "inputTensor2", { 6, 7, 8, 9, 10, 11,
BOOST_FIXTURE_TEST_CASE(ParseConcatenation3DDim2, ConcatenationFixture3DDim2)
{
- RunTest<3, armnn::DataType::QuantisedAsymm8>(
+ RunTest<3, armnn::DataType::QAsymmU8>(
0,
{ { "inputTensor1", { 0, 1, 2,
3, 4, 5 } },
BOOST_FIXTURE_TEST_CASE(SimpleConstantAdd, SimpleConstantAddFixture)
{
- RunTest<2, armnn::DataType::QuantisedAsymm8>(
+ RunTest<2, armnn::DataType::QAsymmU8>(
0,
{{"InputTensor", { 0, 1, 2, 3 }}},
{{"OutputTensor", { 4, 6, 8, 10 }}}
BOOST_FIXTURE_TEST_CASE( ParseSimpleConv2D, SimpleConv2DFixture )
{
- RunTest<4, armnn::DataType::QuantisedAsymm8>(
+ RunTest<4, armnn::DataType::QAsymmU8>(
0,
{
1, 2, 3,
BOOST_FIXTURE_TEST_CASE( ParseConv2DWithBias, SimpleConv2DWithBiasesFixture )
{
- RunTest<4, armnn::DataType::QuantisedAsymm8>(
+ RunTest<4, armnn::DataType::QAsymmU8>(
0,
{
1, 2,
uint8_t outZero = 20;
uint8_t fz = 4; // filter zero point
- RunTest<4, armnn::DataType::QuantisedAsymm8>(
+ RunTest<4, armnn::DataType::QAsymmU8>(
0,
{
1, 2,
{
uint8_t relu6Min = 6 / 2; // divide by output scale
- RunTest<4, armnn::DataType::QuantisedAsymm8>(
+ RunTest<4, armnn::DataType::QAsymmU8>(
0,
{
1, 2,
BOOST_FIXTURE_TEST_CASE(ParseDepthwiseConv2DSame, DepthwiseConvolution2dSameFixture)
{
- RunTest<4, armnn::DataType::QuantisedAsymm8>(
+ RunTest<4, armnn::DataType::QAsymmU8>(
0,
{ 0, 1, 2,
3, 4, 5,
BOOST_FIXTURE_TEST_CASE(ParseDepthwiseConv2DValid, DepthwiseConvolution2dValidFixture)
{
- RunTest<4, armnn::DataType::QuantisedAsymm8>(
+ RunTest<4, armnn::DataType::QAsymmU8>(
0,
{ 0, 1, 2,
3, 4, 5,
BOOST_FIXTURE_TEST_CASE(ParseDepthwiseConv2DSameBias, DepthwiseConvolution2dSameBiasFixture)
{
- RunTest<4, armnn::DataType::QuantisedAsymm8>(
+ RunTest<4, armnn::DataType::QAsymmU8>(
0,
{ 0, 1, 2,
3, 4, 5,
BOOST_FIXTURE_TEST_CASE(SimpleDequantizeQAsymm8, SimpleDequantizeFixtureQAsymm8)
{
- RunTest<2, armnn::DataType::QuantisedAsymm8 , armnn::DataType::Float32>(
+ RunTest<2, armnn::DataType::QAsymmU8 , armnn::DataType::Float32>(
0,
{{"inputTensor", { 0u, 1u, 5u, 100u, 200u, 255u }}},
{{"outputTensor", { 0.0f, 1.5f, 7.5f, 150.0f, 300.0f, 382.5f }}});
BOOST_FIXTURE_TEST_CASE(SimpleDequantizeQsymm16, SimpleDequantizeFixtureQSymm16)
{
- RunTest<2, armnn::DataType::QuantisedSymm16 , armnn::DataType::Float32>(
+ RunTest<2, armnn::DataType::QSymmS16 , armnn::DataType::Float32>(
0,
{{"inputTensor", { 0, 1, 5, 32767, -1, -32768 }}},
{{"outputTensor", { 0.0f, 1.5f, 7.5f, 49150.5f, -1.5f,-49152.0f }}});
{ "num_detections", numDetections}
};
- RunTest<armnn::DataType::QuantisedAsymm8, armnn::DataType::Float32>(0, input, output);
+ RunTest<armnn::DataType::QAsymmU8, armnn::DataType::Float32>(0, input, output);
}
BOOST_FIXTURE_TEST_CASE(DetectionPostProcessGraphStructureTest, ParseDetectionPostProcessCustomOptions)
BOOST_TEST(CheckNumberOfOutputSlot(numDetectionsLayer, 0));
// Check the connections
- armnn::TensorInfo boxEncodingTensor(armnn::TensorShape({ 1, 6, 4 }), armnn::DataType::QuantisedAsymm8, 1, 1);
- armnn::TensorInfo scoresTensor(armnn::TensorShape({ 1, 6, 3 }), armnn::DataType::QuantisedAsymm8,
+ armnn::TensorInfo boxEncodingTensor(armnn::TensorShape({ 1, 6, 4 }), armnn::DataType::QAsymmU8, 1, 1);
+ armnn::TensorInfo scoresTensor(armnn::TensorShape({ 1, 6, 3 }), armnn::DataType::QAsymmU8,
0.00999999978f, 0);
armnn::TensorInfo detectionBoxesTensor(armnn::TensorShape({ 1, 3, 4 }), armnn::DataType::Float32, 0, 0);
BOOST_FIXTURE_TEST_CASE(FullyConnectedWithNoBias, FullyConnectedWithNoBiasFixture)
{
- RunTest<2, armnn::DataType::QuantisedAsymm8>(
+ RunTest<2, armnn::DataType::QAsymmU8>(
0,
{ 10, 20, 30, 40 },
{ 400/2 });
BOOST_FIXTURE_TEST_CASE(ParseFullyConnectedWithBias, FullyConnectedWithBiasFixture)
{
- RunTest<2, armnn::DataType::QuantisedAsymm8>(
+ RunTest<2, armnn::DataType::QAsymmU8>(
0,
{ 10, 20, 30, 40 },
{ (400+10)/2 });
BOOST_FIXTURE_TEST_CASE(FullyConnectedWithBiasMultipleOutputs, FullyConnectedWithBiasMultipleOutputsFixture)
{
- RunTest<2, armnn::DataType::QuantisedAsymm8>(
+ RunTest<2, armnn::DataType::QAsymmU8>(
0,
{ 1, 2, 3, 4, 10, 20, 30, 40 },
{ (40+10)/2, (400+10)/2 });
BOOST_FIXTURE_TEST_CASE(MaxPoolLiteUint1DOutput, MaxPoolLiteFixtureUint1DOutput)
{
- RunTest<4, armnn::DataType::QuantisedAsymm8>(0, { 2, 3, 5, 2 }, { 5 });
+ RunTest<4, armnn::DataType::QAsymmU8>(0, { 2, 3, 5, 2 }, { 5 });
}
BOOST_FIXTURE_TEST_CASE(MaxPoolLiteFloat1DOutput, MaxPoolLiteFixtureFloat1DOutput)
BOOST_FIXTURE_TEST_CASE(MaxPoolLiteUint2DOutput, MaxPoolLiteFixtureUint2DOutput)
{
- RunTest<4, armnn::DataType::QuantisedAsymm8>(
+ RunTest<4, armnn::DataType::QAsymmU8>(
0, { 1, 2, 2, 3, 5, 6, 7, 8, 3, 2, 1, 0, 1, 2, 3, 4 }, { 6, 8, 3, 4 });
}
BOOST_FIXTURE_TEST_CASE(MaxPoolIncorrectDataTypeError, MaxPoolLiteFixtureFloat1DOutput)
{
- BOOST_CHECK_THROW((RunTest<4, armnn::DataType::QuantisedAsymm8>(0, { 2, 3, 5, 2 }, { 5 })), armnn::Exception);
+ BOOST_CHECK_THROW((RunTest<4, armnn::DataType::QAsymmU8>(0, { 2, 3, 5, 2 }, { 5 })), armnn::Exception);
}
BOOST_AUTO_TEST_SUITE_END()
BOOST_FIXTURE_TEST_CASE(ParseReshapeWithReshapeDims, ReshapeFixtureWithReshapeDims)
{
SetupSingleInputSingleOutput("inputTensor", "outputTensor");
- RunTest<2, armnn::DataType::QuantisedAsymm8>(0,
+ RunTest<2, armnn::DataType::QAsymmU8>(0,
{ 1, 2, 3, 4, 5, 6, 7, 8, 9 },
{ 1, 2, 3, 4, 5, 6, 7, 8, 9 });
BOOST_TEST((m_Parser->GetNetworkOutputBindingInfo(0, "outputTensor").second.GetShape()
BOOST_FIXTURE_TEST_CASE(ParseReshapeWithReshapeDimsFlatten, ReshapeFixtureWithReshapeDimsFlatten)
{
SetupSingleInputSingleOutput("inputTensor", "outputTensor");
- RunTest<1, armnn::DataType::QuantisedAsymm8>(0,
+ RunTest<1, armnn::DataType::QAsymmU8>(0,
{ 1, 2, 3, 4, 5, 6, 7, 8, 9 },
{ 1, 2, 3, 4, 5, 6, 7, 8, 9 });
BOOST_TEST((m_Parser->GetNetworkOutputBindingInfo(0, "outputTensor").second.GetShape()
BOOST_FIXTURE_TEST_CASE(ParseReshapeWithReshapeDimsFlattenTwoDims, ReshapeFixtureWithReshapeDimsFlattenTwoDims)
{
SetupSingleInputSingleOutput("inputTensor", "outputTensor");
- RunTest<2, armnn::DataType::QuantisedAsymm8>(0,
+ RunTest<2, armnn::DataType::QAsymmU8>(0,
{ 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6 },
{ 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6 });
BOOST_TEST((m_Parser->GetNetworkOutputBindingInfo(0, "outputTensor").second.GetShape()
BOOST_FIXTURE_TEST_CASE(ParseReshapeWithReshapeDimsFlattenOneDim, ReshapeFixtureWithReshapeDimsFlattenOneDim)
{
SetupSingleInputSingleOutput("inputTensor", "outputTensor");
- RunTest<3, armnn::DataType::QuantisedAsymm8>(0,
+ RunTest<3, armnn::DataType::QAsymmU8>(0,
{ 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6 },
{ 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6 });
BOOST_TEST((m_Parser->GetNetworkOutputBindingInfo(0, "outputTensor").second.GetShape()
BOOST_FIXTURE_TEST_CASE(ParseSoftmaxLite, SoftmaxFixture)
{
- RunTest<2, armnn::DataType::QuantisedAsymm8>(0, { 0, 0, 100, 0, 0, 0, 0 }, { 0, 0, 255, 0, 0, 0, 0 });
+ RunTest<2, armnn::DataType::QAsymmU8>(0, { 0, 0, 100, 0, 0, 0, 0 }, { 0, 0, 255, 0, 0, 0, 0 });
}
BOOST_AUTO_TEST_SUITE_END()
BOOST_FIXTURE_TEST_CASE(ParseAxisOneSplitTwoUint8, SimpleSplitFixtureUint8)
{
- RunTest<4, armnn::DataType::QuantisedAsymm8>(
+ RunTest<4, armnn::DataType::QAsymmU8>(
0,
{ {"inputTensor", { 1, 2, 3, 4, 5, 6, 7, 8,
9, 10, 11, 12, 13, 14, 15, 16 } } },
BOOST_FIXTURE_TEST_CASE(ParseAxisThreeSplitTwoUint8, SimpleSplitAxisThreeFixtureUint8)
{
- RunTest<4, armnn::DataType::QuantisedAsymm8>(
+ RunTest<4, armnn::DataType::QAsymmU8>(
0,
{ {"inputTensor", { 1, 2, 3, 4, 5, 6, 7, 8,
9, 10, 11, 12, 13, 14, 15, 16 } } },
BOOST_FIXTURE_TEST_CASE(SimpleSplit2DUint8, SimpleSplit2DFixtureUint8)
{
- RunTest<2, armnn::DataType::QuantisedAsymm8>(
+ RunTest<2, armnn::DataType::QAsymmU8>(
0,
{ {"inputTensor", { 1, 2, 3, 4, 5, 6, 7, 8 } } },
{ {"outputTensor1", { 1, 2, 3, 4 } },
BOOST_FIXTURE_TEST_CASE(SimpleSplit3DUint8, SimpleSplit3DFixtureUint8)
{
- RunTest<3, armnn::DataType::QuantisedAsymm8>(
+ RunTest<3, armnn::DataType::QAsymmU8>(
0,
{ {"inputTensor", { 1, 2, 3, 4, 5, 6, 7, 8,
9, 10, 11, 12, 13, 14, 15, 16 } } },
BOOST_FIXTURE_TEST_CASE(ParseSqueezeWithSqueezeDims, SqueezeFixtureWithSqueezeDims)
{
SetupSingleInputSingleOutput("inputTensor", "outputTensor");
- RunTest<3, armnn::DataType::QuantisedAsymm8>(0, { 1, 2, 3, 4 }, { 1, 2, 3, 4 });
+ RunTest<3, armnn::DataType::QAsymmU8>(0, { 1, 2, 3, 4 }, { 1, 2, 3, 4 });
BOOST_TEST((m_Parser->GetNetworkOutputBindingInfo(0, "outputTensor").second.GetShape()
== armnn::TensorShape({2,2,1})));
BOOST_FIXTURE_TEST_CASE(ParseSqueezeWithoutSqueezeDims, SqueezeFixtureWithoutSqueezeDims)
{
SetupSingleInputSingleOutput("inputTensor", "outputTensor");
- RunTest<2, armnn::DataType::QuantisedAsymm8>(0, { 1, 2, 3, 4 }, { 1, 2, 3, 4 });
+ RunTest<2, armnn::DataType::QAsymmU8>(0, { 1, 2, 3, 4 }, { 1, 2, 3, 4 });
BOOST_TEST((m_Parser->GetNetworkOutputBindingInfo(0, "outputTensor").second.GetShape()
== armnn::TensorShape({2,2})));
}
BOOST_FIXTURE_TEST_CASE(SimpleSub, SimpleSubFixture)
{
- RunTest<2, armnn::DataType::QuantisedAsymm8>(
+ RunTest<2, armnn::DataType::QAsymmU8>(
0,
{{"inputTensor1", { 4, 5, 6, 7 }},
{"inputTensor2", { 3, 2, 1, 0 }}},
BOOST_FIXTURE_TEST_CASE( ParseSimpleTransposeConv, SimpleTransposeConvFixture )
{
- RunTest<4, armnn::DataType::QuantisedAsymm8>(
+ RunTest<4, armnn::DataType::QAsymmU8>(
0,
{
1, 2,
BOOST_FIXTURE_TEST_CASE(UnpackAxisZeroNumIsDefaultNotSpecifiedUint8, DefaultUnpackAxisZeroUint8Fixture)
{
- RunTest<2, armnn::DataType::QuantisedAsymm8>(
+ RunTest<2, armnn::DataType::QAsymmU8>(
0,
{ {"inputTensor", { 1, 2, 3, 4, 5, 6,
7, 8, 9, 10, 11, 12,
}
BOOST_FIXTURE_TEST_CASE(UnpackLastAxisNumSixUint8, DefaultUnpackLastAxisUint8Fixture) {
- RunTest<2, armnn::DataType::QuantisedAsymm8>(
+ RunTest<2, armnn::DataType::QAsymmU8>(
0,
{{"inputTensor", { 1, 2, 3, 4, 5, 6,
7, 8, 9, 10, 11, 12,
switch (dataType)
{
case DataType::Float32: return "FLOAT32";
- case DataType::QuantisedAsymm8: return "UINT8";
+ case DataType::QAsymmU8: return "UINT8";
default: return "UNKNOWN";
}
}
return arm_compute::DataType::F16;
case armnn::DataType::Float32:
return arm_compute::DataType::F32;
- case armnn::DataType::QuantisedAsymm8:
+ case armnn::DataType::QAsymmU8:
return arm_compute::DataType::QASYMM8;
- case armnn::DataType::QuantisedSymm16:
+ case armnn::DataType::QSymmS16:
return arm_compute::DataType::QSYMM16;
case armnn::DataType::QSymmS8:
return arm_compute::DataType::QSYMM8;
{
const armnn::TensorShape tensorShape = { 1, 2, 3, 4 };
- const armnn::DataType dataType = armnn::DataType::QuantisedAsymm8;
+ const armnn::DataType dataType = armnn::DataType::QAsymmU8;
const std::vector<float> quantScales = { 1.5f, 2.5f, 3.5f, 4.5f };
const float quantScale = quantScales[0];
case armnn::DataType::Float16:
case armnn::DataType::Float32:
return weightsType;
- case armnn::DataType::QuantisedAsymm8:
+ case armnn::DataType::QAsymmU8:
return armnn::DataType::Signed32;
- case armnn::DataType::QuantisedSymm16:
+ case armnn::DataType::QSymmS16:
return armnn::DataType::Signed32;
default:
BOOST_ASSERT_MSG(false, "GetBiasTypeFromWeightsType(): Unsupported data type.");
return MakeWorkloadForType<Float16Workload>::Func(descriptor, info, std::forward<Args>(args)...);
case DataType::Float32:
return MakeWorkloadForType<Float32Workload>::Func(descriptor, info, std::forward<Args>(args)...);
- case DataType::QuantisedAsymm8:
+ case DataType::QAsymmU8:
return MakeWorkloadForType<Uint8Workload>::Func(descriptor, info, std::forward<Args>(args)...);
case DataType::Signed32:
return MakeWorkloadForType<Int32Workload>::Func(descriptor, info, std::forward<Args>(args)...);
case DataType::Boolean:
return MakeWorkloadForType<BooleanWorkload>::Func(descriptor, info, std::forward<Args>(args)...);
- case DataType::QuantisedSymm16:
+ case DataType::QSymmS16:
return nullptr;
default:
BOOST_ASSERT_MSG(false, "Unknown DataType.");
using Float32Workload = TypedWorkload<QueueDescriptor, armnn::DataType::Float32>;
template <typename QueueDescriptor>
-using Uint8Workload = TypedWorkload<QueueDescriptor, armnn::DataType::QuantisedAsymm8>;
+using Uint8Workload = TypedWorkload<QueueDescriptor, armnn::DataType::QAsymmU8>;
template <typename QueueDescriptor>
using Int32Workload = TypedWorkload<QueueDescriptor, armnn::DataType::Signed32>;
template <typename QueueDescriptor>
using BaseUint8ComparisonWorkload = MultiTypedWorkload<QueueDescriptor,
- armnn::DataType::QuantisedAsymm8,
+ armnn::DataType::QAsymmU8,
armnn::DataType::Boolean>;
template <typename QueueDescriptor>
template <typename QueueDescriptor>
using Uint8ToFloat32Workload = MultiTypedWorkload<QueueDescriptor,
- armnn::DataType::QuantisedAsymm8,
+ armnn::DataType::QAsymmU8,
armnn::DataType::Float32>;
} //namespace armnn
return DataType::Float16;
case DataType::Float32:
return DataType::Float32;
- case DataType::QuantisedAsymm8:
+ case DataType::QAsymmU8:
return DataType::Signed32;
- case DataType::QuantisedSymm16:
+ case DataType::QSymmS16:
return DataType::Signed32;
default:
BOOST_ASSERT_MSG(false, "Invalid input data type");
const std::string& descName)
{
const DataType inputType = inputInfo.GetDataType();
- if (inputType == DataType::QuantisedAsymm8)
+ if (inputType == DataType::QAsymmU8)
{
const std::vector<DataType> validTypes =
{
- DataType::QuantisedAsymm8,
+ DataType::QAsymmU8,
DataType::QuantizedSymm8PerAxis
};
const DataType outputDataType = outputInfo.GetDataType();
const bool canHavePerAxisQuantization =
- inputDataType == DataType::QuantisedAsymm8 && inputDataType == outputDataType;
+ inputDataType == DataType::QAsymmU8 && inputDataType == outputDataType;
if (!canHavePerAxisQuantization)
{
{
DataType::Float16,
DataType::Float32,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
{
DataType::Float16,
DataType::Float32,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16,
+ DataType::QAsymmU8,
+ DataType::QSymmS16,
DataType::Signed32
};
{
DataType::Float16,
DataType::Float32,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
DataType::Float16,
DataType::Boolean,
DataType::Signed32,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
DataType::Float16,
DataType::Boolean,
DataType::Signed32,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
DataType::Float16,
DataType::Boolean,
DataType::Signed32,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
ValidateDataTypes(workloadInfo.m_InputTensorInfos[0], supportedTypes, descriptorName);
{
DataType::Float32,
DataType::Float16,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
{
DataType::Float16,
DataType::Float32,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
std::vector<DataType> supportedTypes =
{
DataType::Float32,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16,
+ DataType::QAsymmU8,
+ DataType::QSymmS16,
DataType::Float16
};
std::vector<DataType> supportedTypes =
{
DataType::Float32,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16,
+ DataType::QAsymmU8,
+ DataType::QSymmS16,
DataType::Float16
};
{
DataType::Float16,
DataType::Float32,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
std::vector<DataType> supportedTypes =
{
DataType::Float32,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16,
+ DataType::QAsymmU8,
+ DataType::QSymmS16,
DataType::Float16
};
std::vector<DataType> supportedTypes =
{
DataType::Float32,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16,
+ DataType::QAsymmU8,
+ DataType::QSymmS16,
DataType::Float16
};
{
DataType::Float32,
DataType::Float16,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
{
DataType::Float16,
DataType::Float32,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
{
DataType::Float16,
DataType::Float32,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
{
DataType::Float32,
DataType::Float16,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
DataType::Float32,
DataType::Float16,
DataType::Signed32,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
DataType::Float32,
DataType::Float16,
DataType::Signed32,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
{
DataType::Float16,
DataType::Float32,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
{
DataType::Float32,
DataType::Float16,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
{
DataType::Float32,
DataType::Float16,
- DataType::QuantisedSymm16
+ DataType::QSymmS16
};
ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
{
DataType::Float16,
DataType::Float32,
- DataType::QuantisedSymm16
+ DataType::QSymmS16
};
// check for supported type of one input and match them with all the other input and output
std::vector<DataType> supportedTypes =
{
DataType::Float32,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16,
+ DataType::QAsymmU8,
+ DataType::QSymmS16,
DataType::Float16
};
std::vector<DataType> supportedTypes =
{
DataType::Float32,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16,
+ DataType::QAsymmU8,
+ DataType::QSymmS16,
DataType::Float16
};
DataType::Float16,
DataType::Float32,
DataType::Signed32,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
{
DataType::Float32,
DataType::Float16,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
// First check if input tensor data type is supported, then
ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
- if (outputTensorInfo.GetDataType() != DataType::QuantisedAsymm8 &&
+ if (outputTensorInfo.GetDataType() != DataType::QAsymmU8 &&
outputTensorInfo.GetDataType() != DataType::QSymmS8 &&
- outputTensorInfo.GetDataType() != DataType::QuantisedSymm16)
+ outputTensorInfo.GetDataType() != DataType::QSymmS16)
{
throw InvalidArgumentException(descriptorName + ": Output of quantized layer must be quantized type.");
}
{
DataType::Float32,
DataType::Float16,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
{
DataType::Float16,
DataType::Float32,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
DataType::Float16,
DataType::Float32,
DataType::Signed32,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
{
DataType::Float16,
DataType::Float32,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
{
DataType::Float16,
DataType::Float32,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
{
DataType::Float32,
DataType::Float16,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
ValidateDataTypes(boxEncodingsInfo, supportedInputTypes, descriptorName);
std::vector<DataType> supportedTypes =
{
DataType::Float32,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
{
DataType::Float16,
DataType::Float32,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
{
DataType::Float32,
DataType::Float16,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
std::vector<DataType> inputOutputSupportedTypes =
{
- DataType::QuantisedAsymm8
+ DataType::QAsymmU8
};
std::vector<DataType> cellStateSupportedTypes =
{
- DataType::QuantisedSymm16
+ DataType::QSymmS16
};
std::vector<DataType> weightsSupportedTypes =
{
- DataType::QuantisedAsymm8
+ DataType::QAsymmU8
};
std::vector<DataType> biasSupportedTypes =
{
DataType::Float16,
DataType::Float32,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
{
DataType::Float32,
DataType::Float16,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
ValidateDataTypes(inputInfo, supportedTypes, descriptorName);
biasInfoPtr = &dummyFloat32Bias;
break;
}
- case DataType::QuantisedAsymm8:
- case DataType::QuantisedSymm16:
+ case DataType::QAsymmU8:
+ case DataType::QSymmS16:
{
biasInfoPtr = &dummyQA8Bias;
break;
weightPermuted =
ReorderWeightChannelsForAcl<half_float::half>(weightPermuted, dataLayout, permuteBuffer);
break;
- case DataType::QuantisedAsymm8:
+ case DataType::QAsymmU8:
weightPermuted = ReorderWeightChannelsForAcl<uint8_t>(weightPermuted, dataLayout, permuteBuffer);
break;
case DataType::QuantizedSymm8PerAxis:
inline bool ConstantUsageUint8Test(const std::vector<BackendId>& backends)
{
- TensorInfo commonTensorInfo({ 2, 3 }, DataType::QuantisedAsymm8);
+ TensorInfo commonTensorInfo({ 2, 3 }, DataType::QAsymmU8);
const float scale = 0.023529f;
const int8_t offset = -43;
m_Layer = dummyGraph.AddLayer<armnn::QuantizedLstmLayer>("");
m_Layer->m_QuantizedLstmParameters.m_InputToInputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
- armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QuantisedAsymm8));
+ armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QAsymmU8));
m_Layer->m_QuantizedLstmParameters.m_InputToForgetWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
- armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QuantisedAsymm8));
+ armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QAsymmU8));
m_Layer->m_QuantizedLstmParameters.m_InputToCellWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
- armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QuantisedAsymm8));
+ armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QAsymmU8));
m_Layer->m_QuantizedLstmParameters.m_InputToOutputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
- armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QuantisedAsymm8));
+ armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QAsymmU8));
m_Layer->m_QuantizedLstmParameters.m_RecurrentToInputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
- armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QuantisedAsymm8));
+ armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QAsymmU8));
m_Layer->m_QuantizedLstmParameters.m_RecurrentToForgetWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
- armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QuantisedAsymm8));
+ armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QAsymmU8));
m_Layer->m_QuantizedLstmParameters.m_RecurrentToCellWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
- armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QuantisedAsymm8));
+ armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QAsymmU8));
m_Layer->m_QuantizedLstmParameters.m_RecurrentToOutputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
- armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QuantisedAsymm8));
+ armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QAsymmU8));
m_Layer->m_QuantizedLstmParameters.m_InputGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Signed32));
softmax->GetOutputSlot(0).Connect(output->GetInputSlot(0));
// set the tensors in the network
- TensorInfo inputTensorInfo(TensorShape({1, 5}), DataType::QuantisedAsymm8);
+ TensorInfo inputTensorInfo(TensorShape({1, 5}), DataType::QAsymmU8);
inputTensorInfo.SetQuantizationOffset(100);
inputTensorInfo.SetQuantizationScale(10000.0f);
input->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
- TensorInfo outputTensorInfo(TensorShape({1, 5}), DataType::QuantisedAsymm8);
+ TensorInfo outputTensorInfo(TensorShape({1, 5}), DataType::QAsymmU8);
outputTensorInfo.SetQuantizationOffset(0);
outputTensorInfo.SetQuantizationScale(1.0f / 256.0f);
softmax->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
float outputQScale = 2.0f;
layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({7, 20},
- DataType::QuantisedAsymm8, inputsQScale, 0));
+ DataType::QAsymmU8, inputsQScale, 0));
layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({7},
- GetBiasDataType(DataType::QuantisedAsymm8), inputsQScale));
+ GetBiasDataType(DataType::QAsymmU8), inputsQScale));
layer->m_Weight->Allocate();
layer->m_Bias->Allocate();
Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
// connect up
- Connect(input, layer, TensorInfo({3, 1, 4, 5}, DataType::QuantisedAsymm8, inputsQScale));
- Connect(layer, output, TensorInfo({3, 7}, DataType::QuantisedAsymm8, outputQScale));
+ Connect(input, layer, TensorInfo({3, 1, 4, 5}, DataType::QAsymmU8, inputsQScale));
+ Connect(layer, output, TensorInfo({3, 7}, DataType::QAsymmU8, outputQScale));
// check the constants that they are not NULL
BOOST_CHECK(layer->m_Weight != nullptr);
// Creates a subgraph containing only a single unsupported layer (only convolutions are unsupported by the mock backend)
SubgraphView::SubgraphViewPtr BuildFullyUnsupportedSubgraph1(Graph& graph, LayerNameToLayerMap& layersInGraph)
{
- const TensorInfo inputInfo ({ 1, 16, 16, 16 }, DataType::QuantisedAsymm8, 1.0f, 0);
- const TensorInfo outputInfo({ 1, 16, 16, 16 }, DataType::QuantisedAsymm8, 1.0f, 0);
+ const TensorInfo inputInfo ({ 1, 16, 16, 16 }, DataType::QAsymmU8, 1.0f, 0);
+ const TensorInfo outputInfo({ 1, 16, 16, 16 }, DataType::QAsymmU8, 1.0f, 0);
Pooling2dDescriptor poolingDescriptor;
poolingDescriptor.m_PoolType = armnn::PoolingAlgorithm::Average;
// Creates a subgraph containing only unsupported layers (only convolutions are unsupported by the mock backend)
SubgraphView::SubgraphViewPtr BuildFullyUnsupportedSubgraph2(Graph& graph, LayerNameToLayerMap& layersInGraph)
{
- const TensorInfo inputInfo ({ 1, 16, 16, 16 }, DataType::QuantisedAsymm8, 1.0f, 0);
- const TensorInfo outputInfo({ 1, 16, 16, 16 }, DataType::QuantisedAsymm8, 1.0f, 0);
+ const TensorInfo inputInfo ({ 1, 16, 16, 16 }, DataType::QAsymmU8, 1.0f, 0);
+ const TensorInfo outputInfo({ 1, 16, 16, 16 }, DataType::QAsymmU8, 1.0f, 0);
Pooling2dDescriptor poolingDescriptor;
poolingDescriptor.m_PoolType = armnn::PoolingAlgorithm::Average;
// Creates a simple subgraph with only one convolution layer, supported by the mock backend
SubgraphView::SubgraphViewPtr BuildFullyOptimizableSubgraph1(Graph& graph, LayerNameToLayerMap& layersInGraph)
{
- const TensorInfo inputInfo ({ 1, 16, 16, 16 }, DataType::QuantisedAsymm8, 1.0f, 0);
- const TensorInfo outputInfo({ 1, 16, 16, 16 }, DataType::QuantisedAsymm8, 1.0f, 0);
- const TensorInfo weightInfo({ 16, 1, 1, 16 }, DataType::QuantisedAsymm8, 0.9f, 0);
+ const TensorInfo inputInfo ({ 1, 16, 16, 16 }, DataType::QAsymmU8, 1.0f, 0);
+ const TensorInfo outputInfo({ 1, 16, 16, 16 }, DataType::QAsymmU8, 1.0f, 0);
+ const TensorInfo weightInfo({ 16, 1, 1, 16 }, DataType::QAsymmU8, 0.9f, 0);
const TensorInfo biasInfo ({ 1, 1, 1, 16 }, DataType::Signed32, 0.9f, 0);
Convolution2dDescriptor convolutionDescriptor;
// Creates a subgraph with five convolutions layers, all supported by the mock backend
SubgraphView::SubgraphViewPtr BuildFullyOptimizableSubgraph2(Graph& graph, LayerNameToLayerMap& layersInGraph)
{
- const TensorInfo inputInfo ({ 1, 16, 16, 16 }, DataType::QuantisedAsymm8, 1.0f, 0);
- const TensorInfo outputInfo({ 1, 16, 16, 16 }, DataType::QuantisedAsymm8, 1.0f, 0);
- const TensorInfo weightInfo({ 16, 1, 1, 16 }, DataType::QuantisedAsymm8, 0.9f, 0);
+ const TensorInfo inputInfo ({ 1, 16, 16, 16 }, DataType::QAsymmU8, 1.0f, 0);
+ const TensorInfo outputInfo({ 1, 16, 16, 16 }, DataType::QAsymmU8, 1.0f, 0);
+ const TensorInfo weightInfo({ 16, 1, 1, 16 }, DataType::QAsymmU8, 0.9f, 0);
const TensorInfo biasInfo ({ 1, 1, 1, 16 }, DataType::Signed32, 0.9f, 0);
Convolution2dDescriptor convolutionDescriptor;
// (only convolutions are unsupported by the mock backend)
SubgraphView::SubgraphViewPtr BuildPartiallySupportedSubgraph(Graph& graph, LayerNameToLayerMap& layersInGraph)
{
- const TensorInfo inputInfo ({ 1, 16, 16, 16 }, DataType::QuantisedAsymm8, 1.0f, 0);
- const TensorInfo outputInfo({ 1, 16, 16, 16 }, DataType::QuantisedAsymm8, 1.0f, 0);
- const TensorInfo weightInfo({ 16, 1, 1, 16 }, DataType::QuantisedAsymm8, 0.9f, 0);
+ const TensorInfo inputInfo ({ 1, 16, 16, 16 }, DataType::QAsymmU8, 1.0f, 0);
+ const TensorInfo outputInfo({ 1, 16, 16, 16 }, DataType::QAsymmU8, 1.0f, 0);
+ const TensorInfo weightInfo({ 16, 1, 1, 16 }, DataType::QAsymmU8, 0.9f, 0);
const TensorInfo biasInfo ({ 1, 1, 1, 16 }, DataType::Signed32, 0.9f, 0);
Convolution2dDescriptor convolutionDescriptor;
// Creates a subgraph with only unoptimizable layers ("unoptimizable" is added to the layer's name)
SubgraphView::SubgraphViewPtr BuildFullyUnoptimizableSubgraph1(Graph& graph, LayerNameToLayerMap& layersInGraph)
{
- const TensorInfo inputInfo ({ 1, 16, 16, 16 }, DataType::QuantisedAsymm8, 1.0f, 0);
- const TensorInfo outputInfo({ 1, 16, 16, 16 }, DataType::QuantisedAsymm8, 1.0f, 0);
- const TensorInfo weightInfo({ 16, 1, 1, 16 }, DataType::QuantisedAsymm8, 0.9f, 0);
+ const TensorInfo inputInfo ({ 1, 16, 16, 16 }, DataType::QAsymmU8, 1.0f, 0);
+ const TensorInfo outputInfo({ 1, 16, 16, 16 }, DataType::QAsymmU8, 1.0f, 0);
+ const TensorInfo weightInfo({ 16, 1, 1, 16 }, DataType::QAsymmU8, 0.9f, 0);
const TensorInfo biasInfo ({ 1, 1, 1, 16 }, DataType::Signed32, 0.9f, 0);
Convolution2dDescriptor convolutionDescriptor;
// Creates a subgraph with some unoptimizable layers ("unoptimizable" is added to the layer's name)
SubgraphView::SubgraphViewPtr BuildPartiallyOptimizableSubgraph1(Graph& graph, LayerNameToLayerMap& layersInGraph)
{
- const TensorInfo inputInfo ({ 1, 16, 16, 16 }, DataType::QuantisedAsymm8, 1.0f, 0);
- const TensorInfo outputInfo({ 1, 16, 16, 16 }, DataType::QuantisedAsymm8, 1.0f, 0);
- const TensorInfo weightInfo({ 16, 1, 1, 16 }, DataType::QuantisedAsymm8, 0.9f, 0);
+ const TensorInfo inputInfo ({ 1, 16, 16, 16 }, DataType::QAsymmU8, 1.0f, 0);
+ const TensorInfo outputInfo({ 1, 16, 16, 16 }, DataType::QAsymmU8, 1.0f, 0);
+ const TensorInfo weightInfo({ 16, 1, 1, 16 }, DataType::QAsymmU8, 0.9f, 0);
const TensorInfo biasInfo ({ 1, 1, 1, 16 }, DataType::Signed32, 0.9f, 0);
Convolution2dDescriptor convolutionDescriptor;
// this is meant to test input slots coming from different layers
SubgraphView::SubgraphViewPtr BuildPartiallyOptimizableSubgraph2(Graph& graph, LayerNameToLayerMap& layersInGraph)
{
- const TensorInfo inputInfo ({ 1, 16, 16, 16 }, DataType::QuantisedAsymm8, 1.0f, 0);
- const TensorInfo outputInfo({ 1, 16, 16, 16 }, DataType::QuantisedAsymm8, 1.0f, 0);
- const TensorInfo weightInfo({ 16, 1, 1, 16 }, DataType::QuantisedAsymm8, 0.9f, 0);
+ const TensorInfo inputInfo ({ 1, 16, 16, 16 }, DataType::QAsymmU8, 1.0f, 0);
+ const TensorInfo outputInfo({ 1, 16, 16, 16 }, DataType::QAsymmU8, 1.0f, 0);
+ const TensorInfo weightInfo({ 16, 1, 1, 16 }, DataType::QAsymmU8, 0.9f, 0);
const TensorInfo biasInfo ({ 1, 1, 1, 16 }, DataType::Signed32, 0.9f, 0);
Convolution2dDescriptor convolutionDescriptor;
int32_t cellStateOffset = 0;
armnn::TensorInfo inputWeightsInfo({outputSize, inputSize},
- armnn::DataType::QuantisedAsymm8,
+ armnn::DataType::QAsymmU8,
weightsScale,
weightsOffset);
armnn::TensorInfo recurrentWeightsInfo({outputSize, outputSize},
- armnn::DataType::QuantisedAsymm8,
+ armnn::DataType::QAsymmU8,
weightsScale,
weightsOffset);
armnn::IConnectableLayer* const outputStateOut = net->AddOutputLayer(1);
armnn::TensorInfo inputTensorInfo({batchSize , inputSize},
- armnn::DataType::QuantisedAsymm8,
+ armnn::DataType::QAsymmU8,
inputOutputScale,
inputOutputOffset);
armnn::TensorInfo cellStateInTensorInfo({batchSize , outputSize},
- armnn::DataType::QuantisedSymm16,
+ armnn::DataType::QSymmS16,
cellStateScale,
cellStateOffset);
armnn::TensorInfo outputStateInTensorInfo({batchSize , outputSize},
- armnn::DataType::QuantisedAsymm8,
+ armnn::DataType::QAsymmU8,
inputOutputScale,
inputOutputOffset);
armnn::TensorInfo cellStateOutTensorInfo({batchSize, outputSize},
- armnn::DataType::QuantisedSymm16,
+ armnn::DataType::QSymmS16,
cellStateScale,
cellStateOffset);
armnn::TensorInfo outputTensorInfo({batchSize, outputSize},
- armnn::DataType::QuantisedAsymm8,
+ armnn::DataType::QAsymmU8,
inputOutputScale,
inputOutputOffset);
void QuantizedLstmEndToEnd(const std::vector<armnn::BackendId>& backends)
{
std::vector<uint8_t> inputVector = {166, 179, 50, 150};
- armnn::TensorInfo inputDesc({2, 2}, armnn::DataType::QuantisedAsymm8);
+ armnn::TensorInfo inputDesc({2, 2}, armnn::DataType::QAsymmU8);
boost::multi_array<uint8_t, 2> input = MakeTensor<uint8_t, 2>(inputDesc, inputVector);
std::vector<int16_t> cellStateInVector = {876, 1034, 955, -909, 761, 1029, 796, -1036};
- armnn::TensorInfo cellStateInDesc({2, 4}, armnn::DataType::QuantisedSymm16);
+ armnn::TensorInfo cellStateInDesc({2, 4}, armnn::DataType::QSymmS16);
boost::multi_array<int16_t, 2> cellStateIn = MakeTensor<int16_t, 2>(cellStateInDesc, cellStateInVector);
std::vector<uint8_t> outputStateInVector = {136, 150, 140, 115, 135, 152, 138, 112};
- armnn::TensorInfo outputStateInDesc({2, 4}, armnn::DataType::QuantisedAsymm8);
+ armnn::TensorInfo outputStateInDesc({2, 4}, armnn::DataType::QAsymmU8);
boost::multi_array<uint8_t, 2> outputStateIn = MakeTensor<uint8_t, 2>(outputStateInDesc, outputStateInVector);
std::vector<int16_t> cellStateOutVector = {1485, 1177, 1373, -1023, 1019, 1355, 1097, -1235};
- armnn::TensorInfo cellStateOutVectorDesc({2, 4}, armnn::DataType::QuantisedSymm16);
+ armnn::TensorInfo cellStateOutVectorDesc({2, 4}, armnn::DataType::QSymmS16);
boost::multi_array<int16_t, 2> cellStateOut = MakeTensor<int16_t, 2>(cellStateOutVectorDesc, cellStateOutVector);
std::vector<uint8_t> outputStateOutVector = {140, 151, 146, 112, 136, 156, 142, 112};
- armnn::TensorInfo outputDesc({2, 4}, armnn::DataType::QuantisedAsymm8);
+ armnn::TensorInfo outputDesc({2, 4}, armnn::DataType::QAsymmU8);
boost::multi_array<uint8_t, 2> outputStateOut = MakeTensor<uint8_t, 2>(outputDesc, outputStateOutVector);
// Builds up the structure of the network
const TensorShape weightShape{ cOutput, cInput, hInput, wInput };
const TensorShape biasShape { cOutput };
- constexpr DataType inputType = DataType::QuantisedAsymm8;
+ constexpr DataType inputType = DataType::QAsymmU8;
constexpr DataType weightType = DataType::QuantizedSymm8PerAxis;
constexpr DataType biasType = DataType::Signed32;
case armnn::DataType::Float16:
case armnn::DataType::Float32:
return weightsType;
- case armnn::DataType::QuantisedAsymm8:
+ case armnn::DataType::QAsymmU8:
return armnn::DataType::Signed32;
- case armnn::DataType::QuantisedSymm16:
+ case armnn::DataType::QSymmS16:
return armnn::DataType::Signed32;
default:
BOOST_ASSERT_MSG(false, "GetBiasTypeFromWeightsType(): Unsupported data type.");
float qScale = 0.0625f;
int32_t qOffset = 64;
- if (ArmnnType == armnn::DataType::QuantisedSymm16)
+ if (ArmnnType == armnn::DataType::QSymmS16)
{
qScale = 0.1f;
qOffset = 0;
float qScale = 0.0625f;
int32_t qOffset = 64;
- if (ArmnnType == armnn::DataType::QuantisedSymm16)
+ if (ArmnnType == armnn::DataType::QSymmS16)
{
qScale = 0.1f;
qOffset = 0;
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 2>
-Abs2dTest<armnn::DataType::QuantisedAsymm8>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 2>
+Abs2dTest<armnn::DataType::QAsymmU8>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 2>
-Abs2dTest<armnn::DataType::QuantisedSymm16>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 2>
+Abs2dTest<armnn::DataType::QSymmS16>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 3>
-Abs3dTest<armnn::DataType::QuantisedAsymm8>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 3>
+Abs3dTest<armnn::DataType::QAsymmU8>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 3>
-Abs3dTest<armnn::DataType::QuantisedSymm16>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 3>
+Abs3dTest<armnn::DataType::QSymmS16>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
float outputScale = 6.0f / 255.0f;
int32_t outputOffset = 0;
- return BoundedReLuTestCommon<armnn::DataType::QuantisedAsymm8>(
+ return BoundedReLuTestCommon<armnn::DataType::QAsymmU8>(
workloadFactory, memoryManager, 6.0f, 0.0f,
inputScale, inputOffset, outputScale, outputOffset,
input, output, inputWidth, inputHeight, inputChannels, inputBatchSize);
int32_t inputOffset = 112;
float inputScale = 0.0125f;
- return BoundedReLuTestCommon<armnn::DataType::QuantisedAsymm8>(
+ return BoundedReLuTestCommon<armnn::DataType::QAsymmU8>(
workloadFactory, memoryManager, 1.0f, -1.0f,
inputScale, inputOffset, inputScale, inputOffset, // Input/output scale & offset same.
input, output, inputWidth, inputHeight, inputChannels, inputBatchSize);
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return ConstantLinearActivationTestCommon<armnn::DataType::QuantisedAsymm8>(
+ return ConstantLinearActivationTestCommon<armnn::DataType::QAsymmU8>(
workloadFactory, memoryManager, 4.0f, 3);
}
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return ConstantLinearActivationTestCommon<armnn::DataType::QuantisedSymm16>(
+ return ConstantLinearActivationTestCommon<armnn::DataType::QSymmS16>(
workloadFactory, memoryManager, 0.1f, 0);
}
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SimpleSigmoidTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.1f, 50);
+ return SimpleSigmoidTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, 0.1f, 50);
}
LayerTestResult<int16_t, 4> SimpleSigmoidInt16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SimpleSigmoidTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 0.1f, 0);
+ return SimpleSigmoidTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, 0.1f, 0);
}
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return ReLuTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 0.1f, 0);
+ return ReLuTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, 0.1f, 0);
}
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return ReLuTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.1f, 0);
+ return ReLuTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, 0.1f, 0);
}
LayerTestResult<float, 4> ReLuTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return ReLuTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 0.1f, 0);
+ return ReLuTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, 0.1f, 0);
}
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SoftReLuTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.0625f, 64);
+ return SoftReLuTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, 0.0625f, 64);
}
LayerTestResult<int16_t, 4> SoftReLuInt16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SoftReLuTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 0.1f, 0);
+ return SoftReLuTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, 0.1f, 0);
}
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return LeakyReLuTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.0625f, 64);
+ return LeakyReLuTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, 0.0625f, 64);
}
LayerTestResult<int16_t, 4> LeakyReLuInt16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return LeakyReLuTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 0.1f, 0);
+ return LeakyReLuTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, 0.1f, 0);
}
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return AbsTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.0625f, 64);
+ return AbsTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, 0.0625f, 64);
}
LayerTestResult<int16_t, 4> AbsInt16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return AbsTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 0.1f, 0);
+ return AbsTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, 0.1f, 0);
}
LayerTestResult<float, 5> SqrtNNTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SqrtTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.0625f, 64);
+ return SqrtTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, 0.0625f, 64);
}
LayerTestResult<int16_t, 4> SqrtInt16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SqrtTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 0.1f, 0);
+ return SqrtTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, 0.1f, 0);
}
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SquareTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.0625f, 64);
+ return SquareTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, 0.0625f, 64);
}
LayerTestResult<int16_t, 4> SquareInt16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SquareTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 0.1f, 0);
+ return SquareTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, 0.1f, 0);
}
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return TanhTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.1f, 64);
+ return TanhTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, 0.1f, 64);
}
LayerTestResult<int16_t, 4> TanhInt16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return TanhTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 0.1f, 0);
+ return TanhTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, 0.1f, 0);
}
armnn::IWorkloadFactory& refWorkloadFactory,
armnn::ActivationFunction f)
{
- return CompareActivationTestImpl<armnn::DataType::QuantisedAsymm8>(
+ return CompareActivationTestImpl<armnn::DataType::QAsymmU8>(
workloadFactory, memoryManager, refWorkloadFactory, f, 5, 0.1f, 50);
}
armnn::IWorkloadFactory& refWorkloadFactory,
armnn::ActivationFunction f)
{
- return CompareActivationTestImpl<armnn::DataType::QuantisedSymm16>(
+ return CompareActivationTestImpl<armnn::DataType::QSymmS16>(
workloadFactory, memoryManager, refWorkloadFactory, f, 5, 0.1f, 0);
}
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return AdditionBroadcastTestImpl<armnn::DataType::QuantisedAsymm8>(
+ return AdditionBroadcastTestImpl<armnn::DataType::QAsymmU8>(
workloadFactory, memoryManager, 2.f, 0);
}
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return AdditionBroadcastTestImpl<armnn::DataType::QuantisedSymm16>(
+ return AdditionBroadcastTestImpl<armnn::DataType::QSymmS16>(
workloadFactory, memoryManager, 2.f, 0);
}
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return AdditionBroadcast1ElementTestImpl<armnn::DataType::QuantisedAsymm8>(
+ return AdditionBroadcast1ElementTestImpl<armnn::DataType::QAsymmU8>(
workloadFactory, memoryManager, 0.1333333f, 128);
}
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return AdditionBroadcast1ElementTestImpl<armnn::DataType::QuantisedSymm16>(
+ return AdditionBroadcast1ElementTestImpl<armnn::DataType::QSymmS16>(
workloadFactory, memoryManager, 0.1333333f, 0);
}
255, 186, 255, 186, 255, 214, // 2261(clamped), 1281, 2163(clamped), 1281, 2408(clamped), 1477
});
- return ElementwiseTestHelper<4, armnn::AdditionQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
+ return ElementwiseTestHelper<4, armnn::AdditionQueueDescriptor, armnn::DataType::QAsymmU8>(
workloadFactory,
memoryManager,
shape0,
329, 189, 315, 189, 350, 217, // 2303(clamped), 1323, 2205(clamped), 1323, 2450(clamped), 1519
};
- return ElementwiseTestHelper<4, armnn::AdditionQueueDescriptor, armnn::DataType::QuantisedSymm16>(
+ return ElementwiseTestHelper<4, armnn::AdditionQueueDescriptor, armnn::DataType::QSymmS16>(
workloadFactory,
memoryManager,
shape0,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
template LayerTestResult<int32_t, 3>
-ArgMaxSimpleTest<armnn::DataType::QuantisedAsymm8>(
+ArgMaxSimpleTest<armnn::DataType::QAsymmU8>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
template LayerTestResult<int32_t, 3>
-ArgMaxSimpleTest<armnn::DataType::QuantisedSymm16>(
+ArgMaxSimpleTest<armnn::DataType::QSymmS16>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
template LayerTestResult<int32_t, 3>
-ArgMinSimpleTest<armnn::DataType::QuantisedAsymm8>(
+ArgMinSimpleTest<armnn::DataType::QAsymmU8>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
template LayerTestResult<int32_t, 3>
-ArgMinSimpleTest<armnn::DataType::QuantisedSymm16>(
+ArgMinSimpleTest<armnn::DataType::QSymmS16>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
template LayerTestResult<int32_t, 3>
-ArgMinChannelTest<armnn::DataType::QuantisedAsymm8>(
+ArgMinChannelTest<armnn::DataType::QAsymmU8>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
template LayerTestResult<int32_t, 3>
-ArgMinChannelTest<armnn::DataType::QuantisedSymm16>(
+ArgMinChannelTest<armnn::DataType::QSymmS16>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
template LayerTestResult<int32_t, 3>
-ArgMaxChannelTest<armnn::DataType::QuantisedAsymm8>(
+ArgMaxChannelTest<armnn::DataType::QAsymmU8>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
template LayerTestResult<int32_t, 3>
-ArgMaxChannelTest<armnn::DataType::QuantisedSymm16>(
+ArgMaxChannelTest<armnn::DataType::QSymmS16>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
template LayerTestResult<int32_t, 3>
-ArgMaxHeightTest<armnn::DataType::QuantisedAsymm8>(
+ArgMaxHeightTest<armnn::DataType::QAsymmU8>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
template LayerTestResult<int32_t, 3>
-ArgMinWidthTest<armnn::DataType::QuantisedAsymm8>(
+ArgMinWidthTest<armnn::DataType::QAsymmU8>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
2.f, 4.f
};
- return BatchNormTestImpl<armnn::DataType::QuantisedAsymm8>(
+ return BatchNormTestImpl<armnn::DataType::QAsymmU8>(
workloadFactory,
memoryManager,
inputOutputShape,
6.f, 4.f
};
- return BatchNormTestImpl<armnn::DataType::QuantisedAsymm8>(
+ return BatchNormTestImpl<armnn::DataType::QAsymmU8>(
workloadFactory,
memoryManager,
inputOutputShape, inputValues, expectedOutputValues,
2.f, 4.f
};
- return BatchNormTestImpl<armnn::DataType::QuantisedSymm16>(
+ return BatchNormTestImpl<armnn::DataType::QSymmS16>(
workloadFactory,
memoryManager,
inputOutputShape,
6.f, 4.f
};
- return BatchNormTestImpl<armnn::DataType::QuantisedSymm16>(
+ return BatchNormTestImpl<armnn::DataType::QSymmS16>(
workloadFactory,
memoryManager,
inputOutputShape,
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return ComparisonTestImpl<armnn::DataType::QuantisedAsymm8>(
+ return ComparisonTestImpl<armnn::DataType::QAsymmU8>(
workloadFactory,
memoryManager,
s_SimpleTestData,
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return ComparisonTestImpl<armnn::DataType::QuantisedAsymm8>(
+ return ComparisonTestImpl<armnn::DataType::QAsymmU8>(
workloadFactory,
memoryManager,
s_Broadcast1ElementTestData,
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return ComparisonTestImpl<armnn::DataType::QuantisedAsymm8>(
+ return ComparisonTestImpl<armnn::DataType::QAsymmU8>(
workloadFactory,
memoryManager,
s_Broadcast1dVectorTestData,
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return ComparisonTestImpl<armnn::DataType::QuantisedAsymm8>(
+ return ComparisonTestImpl<armnn::DataType::QAsymmU8>(
workloadFactory,
memoryManager,
s_SimpleTestData,
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return ComparisonTestImpl<armnn::DataType::QuantisedAsymm8>(
+ return ComparisonTestImpl<armnn::DataType::QAsymmU8>(
workloadFactory,
memoryManager,
s_Broadcast1ElementTestData,
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return ComparisonTestImpl<armnn::DataType::QuantisedAsymm8>(
+ return ComparisonTestImpl<armnn::DataType::QAsymmU8>(
workloadFactory,
memoryManager,
s_Broadcast1dVectorTestData,
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return ComparisonTestImpl<armnn::DataType::QuantisedAsymm8>(
+ return ComparisonTestImpl<armnn::DataType::QAsymmU8>(
workloadFactory,
memoryManager,
s_SimpleTestData,
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return ComparisonTestImpl<armnn::DataType::QuantisedAsymm8>(
+ return ComparisonTestImpl<armnn::DataType::QAsymmU8>(
workloadFactory,
memoryManager,
s_Broadcast1ElementTestData,
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return ComparisonTestImpl<armnn::DataType::QuantisedAsymm8>(
+ return ComparisonTestImpl<armnn::DataType::QAsymmU8>(
workloadFactory,
memoryManager,
s_Broadcast1dVectorTestData,
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return ComparisonTestImpl<armnn::DataType::QuantisedAsymm8>(
+ return ComparisonTestImpl<armnn::DataType::QAsymmU8>(
workloadFactory,
memoryManager,
s_SimpleTestData,
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return ComparisonTestImpl<armnn::DataType::QuantisedAsymm8>(
+ return ComparisonTestImpl<armnn::DataType::QAsymmU8>(
workloadFactory,
memoryManager,
s_Broadcast1ElementTestData,
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return ComparisonTestImpl<armnn::DataType::QuantisedAsymm8>(
+ return ComparisonTestImpl<armnn::DataType::QAsymmU8>(
workloadFactory,
memoryManager,
s_Broadcast1dVectorTestData,
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return ComparisonTestImpl<armnn::DataType::QuantisedAsymm8>(
+ return ComparisonTestImpl<armnn::DataType::QAsymmU8>(
workloadFactory,
memoryManager,
s_SimpleTestData,
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return ComparisonTestImpl<armnn::DataType::QuantisedAsymm8>(
+ return ComparisonTestImpl<armnn::DataType::QAsymmU8>(
workloadFactory,
memoryManager,
s_Broadcast1ElementTestData,
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return ComparisonTestImpl<armnn::DataType::QuantisedAsymm8>(
+ return ComparisonTestImpl<armnn::DataType::QAsymmU8>(
workloadFactory,
memoryManager,
s_Broadcast1dVectorTestData,
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return ComparisonTestImpl<armnn::DataType::QuantisedAsymm8>(
+ return ComparisonTestImpl<armnn::DataType::QAsymmU8>(
workloadFactory,
memoryManager,
s_SimpleTestData,
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return ComparisonTestImpl<armnn::DataType::QuantisedAsymm8>(
+ return ComparisonTestImpl<armnn::DataType::QAsymmU8>(
workloadFactory,
memoryManager,
s_Broadcast1ElementTestData,
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return ComparisonTestImpl<armnn::DataType::QuantisedAsymm8>(
+ return ComparisonTestImpl<armnn::DataType::QAsymmU8>(
workloadFactory,
memoryManager,
s_Broadcast1dVectorTestData,
// Explicit template specializations
//
-template LayerTestResult<ResolveType<DataType::QuantisedAsymm8>, 3>
-ConcatDifferentInputOutputQParamTest<DataType::QuantisedAsymm8>(
+template LayerTestResult<ResolveType<DataType::QAsymmU8>, 3>
+ConcatDifferentInputOutputQParamTest<DataType::QAsymmU8>(
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
bool useSubtensor);
-template LayerTestResult<ResolveType<DataType::QuantisedSymm16>, 3>
-ConcatDifferentInputOutputQParamTest<DataType::QuantisedSymm16>(
+template LayerTestResult<ResolveType<DataType::QSymmS16>, 3>
+ConcatDifferentInputOutputQParamTest<DataType::QSymmS16>(
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
bool useSubtensor);
unsigned int inputChannels2 = 1;
// Defines the tensor descriptors.
- TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, DataType::QuantisedAsymm8);
- TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, DataType::QuantisedAsymm8);
- TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, DataType::QuantisedAsymm8);
+ TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, DataType::QAsymmU8);
+ TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, DataType::QAsymmU8);
+ TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, DataType::QAsymmU8);
// Quantized input1 tensor. Range [-3, 1]
const float inputScale1 = 0.015686f;
unsigned int inputChannels2 = 1;
// Defines the tensor descriptors.
- TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, DataType::QuantisedAsymm8);
- TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, DataType::QuantisedAsymm8);
- TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, DataType::QuantisedAsymm8);
+ TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, DataType::QAsymmU8);
+ TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, DataType::QAsymmU8);
+ TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, DataType::QAsymmU8);
// Arbitrary scale and offsets. They don't really matter as the Concat operator doesn't dequantize/quantize them.
const float scale = 0.13497836f;
unsigned int inputChannels2 = 1;
// Defines the tensor descriptors.
- TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, DataType::QuantisedSymm16);
- TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, DataType::QuantisedSymm16);
- TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, DataType::QuantisedSymm16);
+ TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, DataType::QSymmS16);
+ TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, DataType::QSymmS16);
+ TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, DataType::QSymmS16);
// Arbitrary scale and offsets. They don't really matter as the Concat operator doesn't dequantize/quantize them.
const float scale = 0.13497836f;
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concat1dTestImpl<DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
+ return Concat1dTestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, 0.5f, -1);
}
LayerTestResult<uint8_t, 2> Concat2dDim0Uint8Test(
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concat2dDim0TestImpl<DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
+ return Concat2dDim0TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, 0.5f, -1);
}
LayerTestResult<uint8_t, 2> Concat2dDim1Uint8Test(
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concat2dDim1TestImpl<DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
+ return Concat2dDim1TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, 0.5f, -1);
}
LayerTestResult<uint8_t, 2> Concat2dDim0DiffInputDimsUint8Test(
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concat2dDim0DiffInputDimsTestImpl<DataType::QuantisedAsymm8>(
+ return Concat2dDim0DiffInputDimsTestImpl<DataType::QAsymmU8>(
workloadFactory, memoryManager, 0.5f, -1);
}
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concat2dDim1DiffInputDimsTestImpl<DataType::QuantisedAsymm8>(
+ return Concat2dDim1DiffInputDimsTestImpl<DataType::QAsymmU8>(
workloadFactory, memoryManager, 0.5f, -1);
}
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concat3dDim0TestImpl<DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
+ return Concat3dDim0TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, 0.5f, -1);
}
LayerTestResult<uint8_t, 3> Concat3dDim1Uint8Test(
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concat3dDim1TestImpl<DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
+ return Concat3dDim1TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, 0.5f, -1);
}
LayerTestResult<uint8_t, 3> Concat3dDim2Uint8Test(
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
bool useSubtensor)
{
- return Concat3dDim2TestImpl<DataType::QuantisedAsymm8>(
+ return Concat3dDim2TestImpl<DataType::QAsymmU8>(
workloadFactory, memoryManager, useSubtensor, 0.5f, -1);
}
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concat3dDim0TestImpl<DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
+ return Concat3dDim0TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, 0.5f, -1);
}
LayerTestResult<uint8_t, 3> Concat3dDim1DiffInputDimsUint8Test(
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concat3dDim1DiffInputDimsTestImpl<DataType::QuantisedAsymm8>(
+ return Concat3dDim1DiffInputDimsTestImpl<DataType::QAsymmU8>(
workloadFactory, memoryManager, 0.5f, -1);
}
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
bool useSubtensor)
{
- return Concat3dDim2DiffInputDimsTestImpl<DataType::QuantisedAsymm8>(
+ return Concat3dDim2DiffInputDimsTestImpl<DataType::QAsymmU8>(
workloadFactory, memoryManager, useSubtensor, 0.5f, -1);
}
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concat4dDim0TestImpl<DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
+ return Concat4dDim0TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, 0.5f, -1);
}
LayerTestResult<uint8_t, 4> Concat4dDim1Uint8Test(
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concat4dDim1TestImpl<DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
+ return Concat4dDim1TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, 0.5f, -1);
}
LayerTestResult<uint8_t, 4> Concat4dDim2Uint8Test(
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concat4dDim2TestImpl<DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
+ return Concat4dDim2TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, 0.5f, -1);
}
LayerTestResult<uint8_t, 4> Concat4dDim3Uint8Test(
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager, bool useSubtensor)
{
- return Concat4dDim3TestImpl<DataType::QuantisedAsymm8>(
+ return Concat4dDim3TestImpl<DataType::QAsymmU8>(
workloadFactory, memoryManager, 0.5f, -1, useSubtensor);
}
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concat4dDiffShapeDim0TestImpl<DataType::QuantisedAsymm8>(
+ return Concat4dDiffShapeDim0TestImpl<DataType::QAsymmU8>(
workloadFactory, memoryManager, 0.5f, -1);
}
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concat4dDiffShapeDim1TestImpl<DataType::QuantisedAsymm8>(
+ return Concat4dDiffShapeDim1TestImpl<DataType::QAsymmU8>(
workloadFactory, memoryManager, 0.5f, -1);
}
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concat4dDiffShapeDim2TestImpl<DataType::QuantisedAsymm8>(
+ return Concat4dDiffShapeDim2TestImpl<DataType::QAsymmU8>(
workloadFactory, memoryManager, 0.5f, -1);
}
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
bool useSubtensor)
{
- return Concat4dDiffShapeDim3TestImpl<DataType::QuantisedAsymm8>(
+ return Concat4dDiffShapeDim3TestImpl<DataType::QAsymmU8>(
workloadFactory, memoryManager, 0.5f, -1, useSubtensor);
}
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return ConstantTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.0f, 0);
+ return ConstantTestImpl<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, 1.0f, 0);
}
LayerTestResult<uint8_t, 4> ConstantUint8SimpleQuantizationScaleNoOffsetTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return ConstantTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
+ return ConstantTestImpl<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, 1.0f, 0);
}
LayerTestResult<uint8_t, 4> ConstantUint8CustomQuantizationScaleAndOffsetTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return ConstantTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 2e-6f, 1);
+ return ConstantTestImpl<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, 2e-6f, 1);
}
LayerTestResult<int16_t, 4> ConstantInt16CustomQuantizationScaleAndOffsetTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return ConstantTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 2e-6f, 1);
+ return ConstantTestImpl<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, 2e-6f, 1);
}
int32_t qOffset;
switch (ArmnnType)
{
- case armnn::DataType::QuantisedAsymm8:
+ case armnn::DataType::QAsymmU8:
{
qScale = 0.1f;
qOffset = 128;
break;
}
- case armnn::DataType::QuantisedSymm16:
+ case armnn::DataType::QSymmS16:
{
qScale = 0.1f;
qOffset = 0;
int32_t qOffset;
switch (ArmnnType)
{
- case armnn::DataType::QuantisedAsymm8:
+ case armnn::DataType::QAsymmU8:
{
qScale = 0.1f;
qOffset = 128;
break;
}
- case armnn::DataType::QuantisedSymm16:
+ case armnn::DataType::QSymmS16:
{
qScale = 0.1f;
qOffset = 0;
bool,
armnn::DataLayout);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
-Convolution2d3x3Dilation3x3Test<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
+Convolution2d3x3Dilation3x3Test<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
armnn::IWorkloadFactory&,
const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
bool,
armnn::DataLayout);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
-Convolution2d3x3Dilation3x3Test<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
+Convolution2d3x3Dilation3x3Test<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
armnn::IWorkloadFactory&,
const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
bool,
bool,
armnn::DataLayout);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
-Convolution2d2x3x3Dilation3x3Test<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
+Convolution2d2x3x3Dilation3x3Test<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
armnn::IWorkloadFactory&,
const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
bool,
armnn::DataLayout);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
-Convolution2d2x3x3Dilation3x3Test<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
+Convolution2d2x3x3Dilation3x3Test<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
armnn::IWorkloadFactory&,
const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
bool,
bool biasEnabled,
const armnn::DataLayout layout);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
-Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
+Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
armnn::IWorkloadFactory &workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager,
bool biasEnabled,
const armnn::DataLayout layout);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
-Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
+Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
armnn::IWorkloadFactory &workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager,
bool biasEnabled,
bool,
armnn::DataLayout);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
-DepthwiseConvolution2d3x3Dilation3x3Test<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
+DepthwiseConvolution2d3x3Dilation3x3Test<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
armnn::IWorkloadFactory&,
const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
bool,
armnn::DataLayout);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
-DepthwiseConvolution2d3x3Dilation3x3Test<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
+DepthwiseConvolution2d3x3Dilation3x3Test<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
armnn::IWorkloadFactory&,
const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
bool,
bool,
armnn::DataLayout);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
-DepthwiseConvolution2d2x3x3Dilation3x3Test<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
+DepthwiseConvolution2d2x3x3Dilation3x3Test<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
armnn::IWorkloadFactory&,
const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
bool,
armnn::DataLayout);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
-DepthwiseConvolution2d2x3x3Dilation3x3Test<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
+DepthwiseConvolution2d2x3x3Dilation3x3Test<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
armnn::IWorkloadFactory&,
const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
bool,
bool biasEnabled,
const armnn::DataLayout layout)
{
- return SimpleConvolution2d3x5TestCommon<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
+ return SimpleConvolution2d3x5TestCommon<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
}
bool biasEnabled,
const armnn::DataLayout layout)
{
- return SimpleConvolution2d3x3TestCommon<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
+ return SimpleConvolution2d3x3TestCommon<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
}
bool biasEnabled,
const armnn::DataLayout layout)
{
- return SimpleConvolution2d3x5TestCommon<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
+ return SimpleConvolution2d3x5TestCommon<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
}
bool biasEnabled,
const armnn::DataLayout layout)
{
- return SimpleConvolution2d3x3TestCommon<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
+ return SimpleConvolution2d3x3TestCommon<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
}
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
bool biasEnabled)
{
- return Convolution1dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
+ return Convolution1dTestImpl<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
workloadFactory, memoryManager, 0.1f, 128, biasEnabled);
}
{
using namespace armnn;
- const DataType inputType = DataType::QuantisedAsymm8;
+ const DataType inputType = DataType::QAsymmU8;
const DataType kernelType = DataType::QuantizedSymm8PerAxis;
const DataType biasType = DataType::Signed32;
bool biasEnabled,
const armnn::DataLayout layout)
{
- return DepthwiseConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
+ return DepthwiseConvolution2dTestImpl<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
}
bool biasEnabled,
const armnn::DataLayout layout)
{
- return DepthwiseConvolution2dDepthMul1TestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
+ return DepthwiseConvolution2dDepthMul1TestImpl<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
}
bool biasEnabled,
const armnn::DataLayout layout)
{
- return DepthwiseConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
+ return DepthwiseConvolution2dTestImpl<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
}
bool biasEnabled,
const armnn::DataLayout layout)
{
- return DepthwiseConvolution2dDepthMul1TestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
+ return DepthwiseConvolution2dDepthMul1TestImpl<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
}
{
using namespace armnn;
- const DataType inputType = DataType::QuantisedAsymm8;
+ const DataType inputType = DataType::QAsymmU8;
const DataType kernelType = DataType::QuantizedSymm8PerAxis;
const DataType biasType = DataType::Signed32;
armnn::IWorkloadFactory& refWorkloadFactory,
const armnn::DataLayout layout)
{
- return CompareDepthwiseConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8>(
+ return CompareDepthwiseConvolution2dTestImpl<armnn::DataType::QAsymmU8>(
workloadFactory, memoryManager, refWorkloadFactory, layout);
}
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Debug4dTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
+ return Debug4dTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 3> Debug3dUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Debug3dTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
+ return Debug3dTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 2> Debug2dUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Debug2dTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
+ return Debug2dTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 1> Debug1dUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Debug1dTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
+ return Debug1dTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
}
LayerTestResult<int16_t, 4> Debug4dInt16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Debug4dTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
+ return Debug4dTest<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
}
LayerTestResult<int16_t, 3> Debug3dInt16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Debug3dTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
+ return Debug3dTest<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
}
LayerTestResult<int16_t, 2> Debug2dInt16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Debug2dTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
+ return Debug2dTest<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
}
LayerTestResult<int16_t, 1> Debug1dInt16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Debug1dTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
+ return Debug1dTest<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
}
armnn::DataLayout dataLayout);
// QuantisedAsymm8
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
-DepthToSpaceTest1<armnn::DataType::QuantisedAsymm8>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
+DepthToSpaceTest1<armnn::DataType::QAsymmU8>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
armnn::DataLayout dataLayout);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
-DepthToSpaceTest2<armnn::DataType::QuantisedAsymm8>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
+DepthToSpaceTest2<armnn::DataType::QAsymmU8>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
armnn::DataLayout dataLayout);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
-DepthToSpaceTest3<armnn::DataType::QuantisedAsymm8>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
+DepthToSpaceTest3<armnn::DataType::QAsymmU8>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
armnn::DataLayout dataLayout);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
-DepthToSpaceTest4<armnn::DataType::QuantisedAsymm8>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
+DepthToSpaceTest4<armnn::DataType::QAsymmU8>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
armnn::DataLayout dataLayout);
// QuantisedSymm16
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
-DepthToSpaceTest1<armnn::DataType::QuantisedSymm16>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
+DepthToSpaceTest1<armnn::DataType::QSymmS16>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
armnn::DataLayout dataLayout);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
-DepthToSpaceTest2<armnn::DataType::QuantisedSymm16>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
+DepthToSpaceTest2<armnn::DataType::QSymmS16>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
armnn::DataLayout dataLayout);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
-DepthToSpaceTest3<armnn::DataType::QuantisedSymm16>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
+DepthToSpaceTest3<armnn::DataType::QSymmS16>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
armnn::DataLayout dataLayout);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
-DepthToSpaceTest4<armnn::DataType::QuantisedSymm16>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
+DepthToSpaceTest4<armnn::DataType::QSymmS16>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
armnn::DataLayout dataLayout);
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return DequantizeSimpleTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
+ return DequantizeSimpleTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
}
LayerTestResult<float, 4> DequantizeOffsetUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return DequantizeOffsetTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
+ return DequantizeOffsetTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
}
LayerTestResult<float, 4> DequantizeSimpleInt8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return DequantizeSimpleTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
+ return DequantizeSimpleTest<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
}
LayerTestResult<armnn::Half, 4> DequantizeSimpleUint8ToFp16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return DequantizeSimpleTest<armnn::DataType::QuantisedAsymm8, armnn::DataType::Float16>(workloadFactory,
+ return DequantizeSimpleTest<armnn::DataType::QAsymmU8, armnn::DataType::Float16>(workloadFactory,
memoryManager);
}
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return DequantizeSimpleTest<armnn::DataType::QuantisedSymm16, armnn::DataType::Float16>(workloadFactory,
+ return DequantizeSimpleTest<armnn::DataType::QSymmS16, armnn::DataType::Float16>(workloadFactory,
memoryManager);
}
4, 4, 4, 4, 5, 5, 5, 5
};
- return ElementwiseTestHelper<4, armnn::DivisionQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
+ return ElementwiseTestHelper<4, armnn::DivisionQueueDescriptor, armnn::DataType::QAsymmU8>(
workloadFactory,
memoryManager,
shape,
std::vector<uint8_t> output = { 1, 2, 3, 4, 5, 6, 7, 8};
- return ElementwiseTestHelper<4, armnn::DivisionQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
+ return ElementwiseTestHelper<4, armnn::DivisionQueueDescriptor, armnn::DataType::QAsymmU8>(
workloadFactory,
memoryManager,
shape0,
13, 14, 15, 16, 17, 18
};
- return ElementwiseTestHelper<4, armnn::DivisionQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
+ return ElementwiseTestHelper<4, armnn::DivisionQueueDescriptor, armnn::DataType::QAsymmU8>(
workloadFactory,
memoryManager,
shape0,
4, 4, 4, 4, 5, 5, 5, 5
};
- return ElementwiseTestHelper<4, armnn::DivisionQueueDescriptor, armnn::DataType::QuantisedSymm16>(
+ return ElementwiseTestHelper<4, armnn::DivisionQueueDescriptor, armnn::DataType::QSymmS16>(
workloadFactory,
memoryManager,
shape,
std::vector<int16_t> output = { 1, 2, 3, 4, 5, 6, 7, 8};
- return ElementwiseTestHelper<4, armnn::DivisionQueueDescriptor, armnn::DataType::QuantisedSymm16>(
+ return ElementwiseTestHelper<4, armnn::DivisionQueueDescriptor, armnn::DataType::QSymmS16>(
workloadFactory,
memoryManager,
shape0,
13, 14, 15, 16, 17, 18
};
- return ElementwiseTestHelper<4, armnn::DivisionQueueDescriptor, armnn::DataType::QuantisedSymm16>(
+ return ElementwiseTestHelper<4, armnn::DivisionQueueDescriptor, armnn::DataType::QSymmS16>(
workloadFactory,
memoryManager,
shape0,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
-SimpleFloorTest<armnn::DataType::QuantisedSymm16>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
+SimpleFloorTest<armnn::DataType::QSymmS16>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
// Explicit template specializations
//
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 2>
-FullyConnectedTest<armnn::DataType::QuantisedAsymm8>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 2>
+FullyConnectedTest<armnn::DataType::QAsymmU8>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
bool biasEnabled);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 2>
-FullyConnectedTest<armnn::DataType::QuantisedSymm16>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 2>
+FullyConnectedTest<armnn::DataType::QSymmS16>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
bool biasEnabled);
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return GatherTestHelper<armnn::DataType::QuantisedAsymm8>::Gather1dParamsTestImpl(workloadFactory, memoryManager);
+ return GatherTestHelper<armnn::DataType::QAsymmU8>::Gather1dParamsTestImpl(workloadFactory, memoryManager);
}
LayerTestResult<int16_t, 1> Gather1dParamsInt16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return GatherTestHelper<armnn::DataType::QuantisedSymm16>::Gather1dParamsTestImpl(workloadFactory, memoryManager);
+ return GatherTestHelper<armnn::DataType::QSymmS16>::Gather1dParamsTestImpl(workloadFactory, memoryManager);
}
LayerTestResult<float, 2> GatherMultiDimParamsFloat32Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return GatherTestHelper<armnn::DataType::QuantisedAsymm8>::GatherMultiDimParamsTestImpl(
+ return GatherTestHelper<armnn::DataType::QAsymmU8>::GatherMultiDimParamsTestImpl(
workloadFactory, memoryManager);
}
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return GatherTestHelper<armnn::DataType::QuantisedSymm16>::GatherMultiDimParamsTestImpl(
+ return GatherTestHelper<armnn::DataType::QSymmS16>::GatherMultiDimParamsTestImpl(
workloadFactory, memoryManager);
}
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return GatherTestHelper<armnn::DataType::QuantisedAsymm8>::GatherMultiDimParamsMultiDimIndicesTestImpl(
+ return GatherTestHelper<armnn::DataType::QAsymmU8>::GatherMultiDimParamsMultiDimIndicesTestImpl(
workloadFactory, memoryManager);
}
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return GatherTestHelper<armnn::DataType::QuantisedSymm16>::GatherMultiDimParamsMultiDimIndicesTestImpl(
+ return GatherTestHelper<armnn::DataType::QSymmS16>::GatherMultiDimParamsMultiDimIndicesTestImpl(
workloadFactory, memoryManager);
}
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout layout)
{
- return L2Normalization1dTestCommon<armnn::DataType::QuantisedSymm16>(
+ return L2Normalization1dTestCommon<armnn::DataType::QSymmS16>(
workloadFactory,
memoryManager,
1.f,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout layout)
{
- return L2Normalization1dTestCommon<armnn::DataType::QuantisedAsymm8>(
+ return L2Normalization1dTestCommon<armnn::DataType::QAsymmU8>(
workloadFactory,
memoryManager,
1.f,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout layout)
{
- return L2Normalization1dTestCommon<armnn::DataType::QuantisedSymm16>(
+ return L2Normalization1dTestCommon<armnn::DataType::QSymmS16>(
workloadFactory,
memoryManager,
1.f,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout layout)
{
- return L2Normalization1dTestCommon<armnn::DataType::QuantisedAsymm8>(
+ return L2Normalization1dTestCommon<armnn::DataType::QAsymmU8>(
workloadFactory,
memoryManager,
1.f,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout layout)
{
- return L2Normalization1dTestCommon<armnn::DataType::QuantisedSymm16>(
+ return L2Normalization1dTestCommon<armnn::DataType::QSymmS16>(
workloadFactory,
memoryManager,
1.f,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout layout)
{
- return L2Normalization1dTestCommon<armnn::DataType::QuantisedAsymm8>(
+ return L2Normalization1dTestCommon<armnn::DataType::QAsymmU8>(
workloadFactory,
memoryManager,
1.f,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout layout)
{
- return L2Normalization1dTestCommon<armnn::DataType::QuantisedSymm16>(
+ return L2Normalization1dTestCommon<armnn::DataType::QSymmS16>(
workloadFactory,
memoryManager,
1.f,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout layout)
{
- return L2Normalization1dTestCommon<armnn::DataType::QuantisedAsymm8>(
+ return L2Normalization1dTestCommon<armnn::DataType::QAsymmU8>(
workloadFactory,
memoryManager,
1.f,
// Input/Output tensor info
armnn::TensorInfo inputInfo({numBatches , inputSize},
- armnn::DataType::QuantisedAsymm8,
+ armnn::DataType::QAsymmU8,
inputOutputScale,
inputOutputOffset);
armnn::TensorInfo cellStateInfo({numBatches , outputSize},
- armnn::DataType::QuantisedSymm16,
+ armnn::DataType::QSymmS16,
cellStateScale,
cellStateOffset);
armnn::TensorInfo outputStateInfo({numBatches , outputSize},
- armnn::DataType::QuantisedAsymm8,
+ armnn::DataType::QAsymmU8,
inputOutputScale,
inputOutputOffset);
// Weights and bias tensor and quantization info
armnn::TensorInfo inputWeightsInfo({outputSize, inputSize},
- armnn::DataType::QuantisedAsymm8,
+ armnn::DataType::QAsymmU8,
weightsScale,
weightsOffset);
armnn::TensorInfo recurrentWeightsInfo({outputSize, outputSize},
- armnn::DataType::QuantisedAsymm8,
+ armnn::DataType::QAsymmU8,
weightsScale,
weightsOffset);
const float qScale = 1.0f;
const int32_t qOffset = 0;
- const armnn::DataType datatype = armnn::DataType::QuantisedSymm16;
- const armnn::DataType constantDatatype = armnn::DataType::QuantisedAsymm8;
+ const armnn::DataType datatype = armnn::DataType::QSymmS16;
+ const armnn::DataType constantDatatype = armnn::DataType::QAsymmU8;
armnn::TensorInfo inputDesc({2, 2}, datatype);
boost::multi_array<int16_t , 2> input = MakeTensor<int16_t , 2>(
const float qScale = 1.0f;
const int32_t qOffset = 0;
- const armnn::DataType datatype = armnn::DataType::QuantisedSymm16;
- const armnn::DataType constantDatatype = armnn::DataType::QuantisedAsymm8;
+ const armnn::DataType datatype = armnn::DataType::QSymmS16;
+ const armnn::DataType constantDatatype = armnn::DataType::QAsymmU8;
armnn::TensorInfo inputDesc({ 2, 2 }, datatype);
boost::multi_array<int16_t, 2> input =
const float qScale = 2.0f;
const int32_t qOffset = 0;
- const armnn::DataType datatype = armnn::DataType::QuantisedSymm16;
- const armnn::DataType constantDatatype = armnn::DataType::QuantisedAsymm8;
+ const armnn::DataType datatype = armnn::DataType::QSymmS16;
+ const armnn::DataType constantDatatype = armnn::DataType::QAsymmU8;
armnn::TensorInfo inputDesc({ 2, 5 }, datatype);
boost::multi_array<int16_t, 2> input =
const float qScale = 1.0f;
const int32_t qOffset = 0;
- const armnn::DataType datatype = armnn::DataType::QuantisedSymm16; // datatype & constants set to QSymm16
+ const armnn::DataType datatype = armnn::DataType::QSymmS16; // datatype & constants set to QSymm16
armnn::TensorInfo inputDesc({2, 2}, datatype);
boost::multi_array<int16_t , 2> input =
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- armnn::TensorInfo inputDesc({2, 2}, armnn::DataType::QuantisedAsymm8);
+ armnn::TensorInfo inputDesc({2, 2}, armnn::DataType::QAsymmU8);
boost::multi_array<uint8_t, 2> input = MakeTensor<uint8_t, 2>(inputDesc, std::vector<uint8_t>(
{166, 179, 50, 150}));
- armnn::TensorInfo outputDesc({2, 4}, armnn::DataType::QuantisedAsymm8);
+ armnn::TensorInfo outputDesc({2, 4}, armnn::DataType::QAsymmU8);
boost::multi_array<uint8_t, 2> expectedOutput = MakeTensor<uint8_t, 2>(outputDesc, std::vector<uint8_t>(
{140, 151, 146, 112, 136, 156, 142, 112 }));
4, 4, 4, 4, 5, 5, 5, 5
};
- return ElementwiseTestHelper<4, armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
+ return ElementwiseTestHelper<4, armnn::MaximumQueueDescriptor, armnn::DataType::QAsymmU8>(
workloadFactory,
memoryManager,
shape,
7, 8, 9, 10, 11, 12
};
- return ElementwiseTestHelper<4, armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
+ return ElementwiseTestHelper<4, armnn::MaximumQueueDescriptor, armnn::DataType::QAsymmU8>(
workloadFactory,
memoryManager,
shape0,
7, 10, 9, 10, 11, 12
};
- return ElementwiseTestHelper<4, armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
+ return ElementwiseTestHelper<4, armnn::MaximumQueueDescriptor, armnn::DataType::QAsymmU8>(
workloadFactory,
memoryManager,
shape0,
std::vector<int16_t> output({ 2, 2, 2, 2, 6, 6, 6, 6,
4, 4, 4, 4, 5, 5, 5, 5 });
- return ElementwiseTestHelper<4, armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
+ return ElementwiseTestHelper<4, armnn::MaximumQueueDescriptor, armnn::DataType::QSymmS16>(
workloadFactory,
memoryManager,
shape,
7, 8, 9, 10, 11, 12
};
- return ElementwiseTestHelper<4, armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
+ return ElementwiseTestHelper<4, armnn::MaximumQueueDescriptor, armnn::DataType::QSymmS16>(
workloadFactory,
memoryManager,
shape0,
7, 10, 9, 10, 11, 12
};
- return ElementwiseTestHelper<4, armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
+ return ElementwiseTestHelper<4, armnn::MaximumQueueDescriptor, armnn::DataType::QSymmS16>(
workloadFactory,
memoryManager,
shape0,
1, 1, 2, 1, 2, 3
};
- return ElementwiseTestHelper<4, armnn::MinimumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
+ return ElementwiseTestHelper<4, armnn::MinimumQueueDescriptor, armnn::DataType::QAsymmU8>(
workloadFactory,
memoryManager,
shape0,
3, 3, 3, 3, 4, 4, 4, 4
};
- return ElementwiseTestHelper<4, armnn::MinimumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
+ return ElementwiseTestHelper<4, armnn::MinimumQueueDescriptor, armnn::DataType::QSymmS16>(
workloadFactory,
memoryManager,
shape,
2, 2, 2, 2, 2, 2
};
- return ElementwiseTestHelper<4, armnn::MinimumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
+ return ElementwiseTestHelper<4, armnn::MinimumQueueDescriptor, armnn::DataType::QSymmS16>(
workloadFactory,
memoryManager,
shape0,
1, 8, 3, 1, 10, 3
};
- return ElementwiseTestHelper<4, armnn::MinimumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
+ return ElementwiseTestHelper<4, armnn::MinimumQueueDescriptor, armnn::DataType::QSymmS16>(
workloadFactory,
memoryManager,
shape0,
};
// Scale/offset chosen to have output values out of range
- return ElementwiseTestHelper<4, armnn::MultiplicationQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
+ return ElementwiseTestHelper<4, armnn::MultiplicationQueueDescriptor, armnn::DataType::QAsymmU8>(
workloadFactory,
memoryManager,
shape,
14, 16, 18, 20, 22, 24
};
- return ElementwiseTestHelper<4, armnn::MultiplicationQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
+ return ElementwiseTestHelper<4, armnn::MultiplicationQueueDescriptor, armnn::DataType::QAsymmU8>(
workloadFactory,
memoryManager,
shape0,
7, 16, 27, 10, 22, 36
};
- return ElementwiseTestHelper<4, armnn::MultiplicationQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
+ return ElementwiseTestHelper<4, armnn::MultiplicationQueueDescriptor, armnn::DataType::QAsymmU8>(
workloadFactory,
memoryManager,
shape0,
84, 104, 126, 150, 176, 204
};
- return ElementwiseTestHelper<4, armnn::MultiplicationQueueDescriptor, armnn::DataType::QuantisedSymm16>(
+ return ElementwiseTestHelper<4, armnn::MultiplicationQueueDescriptor, armnn::DataType::QSymmS16>(
workloadFactory,
memoryManager,
shape,
14, 16, 18, 20, 22, 24
};
- return ElementwiseTestHelper<4, armnn::MultiplicationQueueDescriptor, armnn::DataType::QuantisedSymm16>(
+ return ElementwiseTestHelper<4, armnn::MultiplicationQueueDescriptor, armnn::DataType::QSymmS16>(
workloadFactory,
memoryManager,
shape0,
7, 16, 27, 10, 22, 36
};
- return ElementwiseTestHelper<4, armnn::MultiplicationQueueDescriptor, armnn::DataType::QuantisedSymm16>(
+ return ElementwiseTestHelper<4, armnn::MultiplicationQueueDescriptor, armnn::DataType::QSymmS16>(
workloadFactory,
memoryManager,
shape0,
// Explicit template specializations
//
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 2>
-Pad2dTestCommon<armnn::DataType::QuantisedSymm16>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 2>
+Pad2dTestCommon<armnn::DataType::QSymmS16>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
float qScale,
int32_t qOffset,
const float customPaddingValue);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 3>
-Pad3dTestCommon<armnn::DataType::QuantisedSymm16>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 3>
+Pad3dTestCommon<armnn::DataType::QSymmS16>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
float qScale,
int32_t qOffset);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
-Pad4dTestCommon<armnn::DataType::QuantisedSymm16>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
+Pad4dTestCommon<armnn::DataType::QSymmS16>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
float qScale,
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Pad2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
+ return Pad2dTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, 1.0f, 0);
}
LayerTestResult<uint8_t, 2> PadUint82dCustomPaddingTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Pad2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0, 1.0f);
+ return Pad2dTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, 1.0f, 0, 1.0f);
}
LayerTestResult<uint8_t, 3> PadUint83dTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Pad3dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
+ return Pad3dTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, 1.0f, 0);
}
LayerTestResult<uint8_t, 4> PadUint84dTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Pad4dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
+ return Pad4dTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, 1.0f, 0);
}
LayerTestResult<float, 2> PadFloat322dTest(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
bool forceNoPadding)
{
- return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::QuantisedAsymm8>(
+ return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::QAsymmU8>(
workloadFactory, memoryManager, forceNoPadding, 3.0f, -5);
}
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
bool forceNoPadding)
{
- return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::QuantisedSymm16>(
+ return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::QSymmS16>(
workloadFactory, memoryManager, forceNoPadding);
}
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
bool forceNoPadding)
{
- return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::QuantisedAsymm8>(
+ return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::QAsymmU8>(
workloadFactory, memoryManager, forceNoPadding, 0.1f, 128);
}
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
bool forceNoPadding)
{
- return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::QuantisedSymm16>(
+ return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::QSymmS16>(
workloadFactory, memoryManager, forceNoPadding);
}
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout dataLayout)
{
- return SimpleMaxPooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, dataLayout);
+ return SimpleMaxPooling2dTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, dataLayout);
}
LayerTestResult<int16_t, 4> SimpleMaxPooling2dInt16Test(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout dataLayout)
{
- return SimpleMaxPooling2dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, dataLayout);
+ return SimpleMaxPooling2dTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, dataLayout);
}
LayerTestResult<float, 4> IgnorePaddingSimpleMaxPooling2dTest(
armnn::IWorkloadFactory& workloadFactory,
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
+ return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::QAsymmU8>(
workloadFactory, memoryManager, 1.0f, -5);
}
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::QuantisedSymm16>(
+ return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::QSymmS16>(
workloadFactory, memoryManager);
}
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::QuantisedAsymm8>(
+ return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::QAsymmU8>(
workloadFactory, memoryManager, 1.0f, -5);
}
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::QuantisedSymm16>(
+ return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::QSymmS16>(
workloadFactory, memoryManager);
}
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout dataLayout)
{
- return SimpleAveragePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
+ return SimpleAveragePooling2dTestCommon<armnn::DataType::QAsymmU8>(
workloadFactory, memoryManager, dataLayout, 0.5, -1);
}
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout dataLayout)
{
- return SimpleAveragePooling2dTestCommon<armnn::DataType::QuantisedSymm16>(
+ return SimpleAveragePooling2dTestCommon<armnn::DataType::QSymmS16>(
workloadFactory, memoryManager, dataLayout);
}
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
+ return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::QAsymmU8>(
workloadFactory, memoryManager, 0.5, -1);
}
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::QuantisedSymm16>(
+ return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::QSymmS16>(
workloadFactory, memoryManager);
}
LayerTestResult<float, 4> IgnorePaddingSimpleAveragePooling2dTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
+ return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::QAsymmU8>(
workloadFactory, memoryManager);
}
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::QuantisedSymm16>(
+ return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::QSymmS16>(
workloadFactory, memoryManager);
}
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::QuantisedAsymm8>(
+ return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::QAsymmU8>(
workloadFactory, memoryManager);
}
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::QuantisedSymm16>(
+ return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::QSymmS16>(
workloadFactory, memoryManager);
}
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::QuantisedAsymm8>(
+ return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::QAsymmU8>(
workloadFactory, memoryManager);
}
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::QuantisedSymm16>(
+ return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::QSymmS16>(
workloadFactory, memoryManager);
}
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout dataLayout)
{
- return SimpleL2Pooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, dataLayout);
+ return SimpleL2Pooling2dTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, dataLayout);
}
LayerTestResult<int16_t, 4> SimpleL2Pooling2dInt16Test(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout dataLayout)
{
- return SimpleL2Pooling2dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, dataLayout);
+ return SimpleL2Pooling2dTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, dataLayout);
}
LayerTestResult<float, 4> L2Pooling2dSize3Stride1Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
+ return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
}
LayerTestResult<int16_t, 4> L2Pooling2dSize3Stride1Int16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
+ return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
}
LayerTestResult<float, 4> L2Pooling2dSize3Stride3Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
+ return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
}
LayerTestResult<int16_t, 4> L2Pooling2dSize3Stride3Int16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
+ return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
}
LayerTestResult<float, 4> L2Pooling2dSize3Stride4Test(
armnn::IWorkloadFactory& workloadFactory,
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
+ return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
}
LayerTestResult<int16_t, 4> L2Pooling2dSize3Stride4Int16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
+ return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
}
LayerTestResult<float, 4> L2Pooling2dSize7Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return L2Pooling2dSize7TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
+ return L2Pooling2dSize7TestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
}
LayerTestResult<int16_t, 4> L2Pooling2dSize7Int16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return L2Pooling2dSize7TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
+ return L2Pooling2dSize7TestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
}
LayerTestResult<float, 4> L2Pooling2dSize9Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return L2Pooling2dSize9TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
+ return L2Pooling2dSize9TestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
}
LayerTestResult<int16_t, 4> L2Pooling2dSize9Int16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return L2Pooling2dSize9TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
+ return L2Pooling2dSize9TestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
}
LayerTestResult<float, 4> IgnorePaddingSimpleL2Pooling2dTest(
armnn::IWorkloadFactory& workloadFactory,
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
+ return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
}
LayerTestResult<int16_t, 4> IgnorePaddingSimpleL2Pooling2dInt16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
+ return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
}
LayerTestResult<float, 4> IgnorePaddingL2Pooling2dSize3Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
+ return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
}
LayerTestResult<int16_t, 4> IgnorePaddingL2Pooling2dSize3Int16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
+ return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
}
LayerTestResult<float, 4> AsymmetricNonSquarePooling2dTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
+ return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
}
LayerTestResult<int16_t, 4> AsymmetricNonSquarePooling2dInt16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
+ return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
}
LayerTestResult<float, 4> ComparePooling2dTest(
armnn::IWorkloadFactory& refWorkloadFactory,
armnn::PoolingAlgorithm poolingType)
{
- return ComparePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
+ return ComparePooling2dTestCommon<armnn::DataType::QAsymmU8>(
workloadFactory, memoryManager, refWorkloadFactory, poolingType, 0.1f, 128);
}
armnn::IWorkloadFactory& refWorkloadFactory,
armnn::PoolingAlgorithm poolingType)
{
- return ComparePooling2dTestCommon<armnn::DataType::QuantisedSymm16>(
+ return ComparePooling2dTestCommon<armnn::DataType::QSymmS16>(
workloadFactory, memoryManager, refWorkloadFactory, poolingType);
}
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return QuantizeSimpleTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
+ return QuantizeSimpleTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 4> QuantizeClampUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return QuantizeClampTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
+ return QuantizeClampTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
}
LayerTestResult<int8_t, 4> QuantizeClampInt8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return QuantizeClampTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
+ return QuantizeClampTest<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
}
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
-SimpleReshapeTest<armnn::DataType::QuantisedAsymm8>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
+SimpleReshapeTest<armnn::DataType::QAsymmU8>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
-SimpleReshapeTest<armnn::DataType::QuantisedSymm16>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
+SimpleReshapeTest<armnn::DataType::QSymmS16>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 5>
-Reshape5dTest<armnn::DataType::QuantisedAsymm8>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 5>
+Reshape5dTest<armnn::DataType::QAsymmU8>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 5>
-Reshape5dTest<armnn::DataType::QuantisedSymm16>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 5>
+Reshape5dTest<armnn::DataType::QSymmS16>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
int32_t outQuantOffset);
// QAsymm8
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
-ResizeBilinearNopTest<armnn::DataType::QuantisedAsymm8>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
+ResizeBilinearNopTest<armnn::DataType::QAsymmU8>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout dataLayout);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
-SimpleResizeBilinearTest<armnn::DataType::QuantisedAsymm8>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
+SimpleResizeBilinearTest<armnn::DataType::QAsymmU8>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout dataLayout);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
-ResizeBilinearSqMinTest<armnn::DataType::QuantisedAsymm8>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
+ResizeBilinearSqMinTest<armnn::DataType::QAsymmU8>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout dataLayout);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
-ResizeBilinearMinTest<armnn::DataType::QuantisedAsymm8>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
+ResizeBilinearMinTest<armnn::DataType::QAsymmU8>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout dataLayout);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
-ResizeBilinearMagTest<armnn::DataType::QuantisedAsymm8>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
+ResizeBilinearMagTest<armnn::DataType::QAsymmU8>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout dataLayout);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
-ResizeNearestNeighborNopTest<armnn::DataType::QuantisedAsymm8>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
+ResizeNearestNeighborNopTest<armnn::DataType::QAsymmU8>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout dataLayout);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
-SimpleResizeNearestNeighborTest<armnn::DataType::QuantisedAsymm8>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
+SimpleResizeNearestNeighborTest<armnn::DataType::QAsymmU8>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout dataLayout);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
-ResizeNearestNeighborSqMinTest<armnn::DataType::QuantisedAsymm8>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
+ResizeNearestNeighborSqMinTest<armnn::DataType::QAsymmU8>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout dataLayout);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
-ResizeNearestNeighborMinTest<armnn::DataType::QuantisedAsymm8>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
+ResizeNearestNeighborMinTest<armnn::DataType::QAsymmU8>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout dataLayout);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
-ResizeNearestNeighborMagTest<armnn::DataType::QuantisedAsymm8>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
+ResizeNearestNeighborMagTest<armnn::DataType::QAsymmU8>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout dataLayout,
int32_t outQuantOffset);
// QSymm16
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
-ResizeBilinearNopTest<armnn::DataType::QuantisedSymm16>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
+ResizeBilinearNopTest<armnn::DataType::QSymmS16>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout dataLayout);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
-SimpleResizeBilinearTest<armnn::DataType::QuantisedSymm16>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
+SimpleResizeBilinearTest<armnn::DataType::QSymmS16>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout dataLayout);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
-ResizeBilinearSqMinTest<armnn::DataType::QuantisedSymm16>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
+ResizeBilinearSqMinTest<armnn::DataType::QSymmS16>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout dataLayout);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
-ResizeBilinearMinTest<armnn::DataType::QuantisedSymm16>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
+ResizeBilinearMinTest<armnn::DataType::QSymmS16>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout dataLayout);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
-ResizeBilinearMagTest<armnn::DataType::QuantisedSymm16>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
+ResizeBilinearMagTest<armnn::DataType::QSymmS16>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout dataLayout);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
-ResizeNearestNeighborNopTest<armnn::DataType::QuantisedSymm16>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
+ResizeNearestNeighborNopTest<armnn::DataType::QSymmS16>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout dataLayout);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
-SimpleResizeNearestNeighborTest<armnn::DataType::QuantisedSymm16>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
+SimpleResizeNearestNeighborTest<armnn::DataType::QSymmS16>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout dataLayout);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
-ResizeNearestNeighborSqMinTest<armnn::DataType::QuantisedSymm16>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
+ResizeNearestNeighborSqMinTest<armnn::DataType::QSymmS16>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout dataLayout);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
-ResizeNearestNeighborMinTest<armnn::DataType::QuantisedSymm16>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
+ResizeNearestNeighborMinTest<armnn::DataType::QSymmS16>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout dataLayout);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
-ResizeNearestNeighborMagTest<armnn::DataType::QuantisedSymm16>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
+ResizeNearestNeighborMagTest<armnn::DataType::QSymmS16>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout dataLayout,
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 2>
-Rsqrt2dTest<armnn::DataType::QuantisedAsymm8>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 2>
+Rsqrt2dTest<armnn::DataType::QAsymmU8>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 2>
-Rsqrt2dTest<armnn::DataType::QuantisedSymm16>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 2>
+Rsqrt2dTest<armnn::DataType::QSymmS16>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 3>
-Rsqrt3dTest<armnn::DataType::QuantisedAsymm8>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 3>
+Rsqrt3dTest<armnn::DataType::QAsymmU8>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 3>
-Rsqrt3dTest<armnn::DataType::QuantisedSymm16>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 3>
+Rsqrt3dTest<armnn::DataType::QSymmS16>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
LayerTestResult<uint8_t, 4> Slice4dUint8Test(armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Slice4dTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
+ return Slice4dTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 3> Slice3dUint8Test(armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Slice3dTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
+ return Slice3dTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 2> Slice2dUint8Test(armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Slice2dTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
+ return Slice2dTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 1> Slice1dUint8Test(armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Slice1dTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
+ return Slice1dTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
}
// Int16 tests
LayerTestResult<int16_t, 4> Slice4dInt16Test(armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Slice4dTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
+ return Slice4dTest<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
}
LayerTestResult<int16_t, 3> Slice3dInt16Test(armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Slice3dTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
+ return Slice3dTest<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
}
LayerTestResult<int16_t, 2> Slice2dInt16Test(armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Slice2dTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
+ return Slice2dTest<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
}
LayerTestResult<int16_t, 1> Slice1dInt16Test(armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Slice1dTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
+ return Slice1dTest<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
}
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
float beta)
{
- return SimpleSoftmaxTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, beta);
+ return SimpleSoftmaxTestImpl<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, beta);
}
LayerTestResult<uint8_t,3> Simple3dSoftmaxUint8Test(
float beta)
{
Simple3dSoftmaxOutputData data;
- return Simple3dSoftmaxTestImpl<armnn::DataType::QuantisedAsymm8>(
+ return Simple3dSoftmaxTestImpl<armnn::DataType::QAsymmU8>(
workloadFactory,
memoryManager,
beta,
{
Simple4dSoftmaxData data;
- return Simple4dSoftmaxTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, beta,
+ return Simple4dSoftmaxTestImpl<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, beta,
data.inputShape, data.outputData, data.inputData);
}
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
float beta)
{
- return SimpleSoftmaxTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, beta);
+ return SimpleSoftmaxTestImpl<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, beta);
}
LayerTestResult<int16_t,3> Simple3dSoftmaxUint16Test(
float beta)
{
Simple3dSoftmaxOutputData data;
- return Simple3dSoftmaxTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, beta,
+ return Simple3dSoftmaxTestImpl<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, beta,
data.inputShape, data.outputData, data.inputData);
}
{
Simple4dSoftmaxData data;
- return Simple4dSoftmaxTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, beta,
+ return Simple4dSoftmaxTestImpl<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, beta,
data.inputShape, data.outputData, data.inputData);
}
armnn::IWorkloadFactory& refWorkloadFactory,
float beta)
{
- return CompareSoftmaxTestImpl<armnn::DataType::QuantisedAsymm8>(
+ return CompareSoftmaxTestImpl<armnn::DataType::QAsymmU8>(
workloadFactory, memoryManager, refWorkloadFactory, beta);
}
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SpaceToBatchNdSimpleTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
+ return SpaceToBatchNdSimpleTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiChannelsUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SpaceToBatchNdMultiChannelsTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
+ return SpaceToBatchNdMultiChannelsTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiBlockUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SpaceToBatchNdMultiBlockTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
+ return SpaceToBatchNdMultiBlockTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 4> SpaceToBatchNdPaddingUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SpaceToBatchNdPaddingTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
+ return SpaceToBatchNdPaddingTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
}
LayerTestResult<float, 4> SpaceToBatchNdSimpleNhwcFloat32Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SpaceToBatchNdSimpleNhwcTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
+ return SpaceToBatchNdSimpleNhwcTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiChannelsNhwcUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SpaceToBatchNdMultiChannelsNhwcTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
+ return SpaceToBatchNdMultiChannelsNhwcTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiBlockNhwcUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SpaceToBatchNdMultiBlockNhwcTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
+ return SpaceToBatchNdMultiBlockNhwcTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 4> SpaceToBatchNdPaddingNhwcUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SpaceToBatchNdPaddingNhwcTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
+ return SpaceToBatchNdPaddingNhwcTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
}
LayerTestResult<int16_t, 4> SpaceToBatchNdSimpleUint16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SpaceToBatchNdSimpleTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
+ return SpaceToBatchNdSimpleTest<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
}
LayerTestResult<int16_t, 4> SpaceToBatchNdMultiChannelsUint16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SpaceToBatchNdMultiChannelsTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
+ return SpaceToBatchNdMultiChannelsTest<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
}
LayerTestResult<int16_t, 4> SpaceToBatchNdMultiBlockUint16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SpaceToBatchNdMultiBlockTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
+ return SpaceToBatchNdMultiBlockTest<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
}
LayerTestResult<int16_t, 4> SpaceToBatchNdPaddingUint16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SpaceToBatchNdPaddingTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
+ return SpaceToBatchNdPaddingTest<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
}
LayerTestResult<int16_t, 4> SpaceToBatchNdSimpleNhwcUint16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SpaceToBatchNdSimpleNhwcTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
+ return SpaceToBatchNdSimpleNhwcTest<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
}
LayerTestResult<int16_t, 4> SpaceToBatchNdMultiChannelsNhwcUint16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SpaceToBatchNdMultiChannelsNhwcTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
+ return SpaceToBatchNdMultiChannelsNhwcTest<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
}
LayerTestResult<int16_t, 4> SpaceToBatchNdMultiBlockNhwcUint16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SpaceToBatchNdMultiBlockNhwcTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
+ return SpaceToBatchNdMultiBlockNhwcTest<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
}
LayerTestResult<int16_t, 4> SpaceToBatchNdPaddingNhwcUint16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SpaceToBatchNdPaddingNhwcTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
+ return SpaceToBatchNdPaddingNhwcTest<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
}
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SpaceToDepthSimpleTest1<armnn::DataType::QuantisedAsymm8>(
+ return SpaceToDepthSimpleTest1<armnn::DataType::QAsymmU8>(
workloadFactory,
memoryManager);
}
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SpaceToDepthSimpleTest1<armnn::DataType::QuantisedAsymm8>(
+ return SpaceToDepthSimpleTest1<armnn::DataType::QAsymmU8>(
workloadFactory,
memoryManager,
armnn::DataLayout::NCHW);
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SpaceToDepthSimpleTest2<armnn::DataType::QuantisedSymm16>(
+ return SpaceToDepthSimpleTest2<armnn::DataType::QSymmS16>(
workloadFactory,
memoryManager);
}
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SpaceToDepthSimpleTest2<armnn::DataType::QuantisedSymm16>(
+ return SpaceToDepthSimpleTest2<armnn::DataType::QSymmS16>(
workloadFactory,
memoryManager,
armnn::DataLayout::NCHW);
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SplitterTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
+ return SplitterTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, 1.0f, 0);
}
std::vector<LayerTestResult<int16_t,3>> SplitterInt16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SplitterTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.0f, 0);
+ return SplitterTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, 1.0f, 0);
}
LayerTestResult<float, 3> CopyViaSplitterFloat32Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return CopyViaSplitterTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
+ return CopyViaSplitterTestImpl<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, 1.0f, 0);
}
LayerTestResult<int16_t, 3> CopyViaSplitterInt16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return CopyViaSplitterTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.0f, 0);
+ return CopyViaSplitterTestImpl<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, 1.0f, 0);
}
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return StridedSlice4dTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
+ return StridedSlice4dTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 4> StridedSlice4dReverseUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return StridedSlice4dReverseTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
+ return StridedSlice4dReverseTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 4> StridedSliceSimpleStrideUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return StridedSliceSimpleStrideTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
+ return StridedSliceSimpleStrideTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 4> StridedSliceSimpleRangeMaskUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return StridedSliceSimpleRangeMaskTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
+ return StridedSliceSimpleRangeMaskTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 2> StridedSliceShrinkAxisMaskUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return StridedSliceShrinkAxisMaskTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
+ return StridedSliceShrinkAxisMaskTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 2> StridedSliceShrinkAxisMaskBitPosition0Dim3Uint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return StridedSliceShrinkAxisMaskBitPosition0Dim3Test<armnn::DataType::QuantisedAsymm8>(workloadFactory,
+ return StridedSliceShrinkAxisMaskBitPosition0Dim3Test<armnn::DataType::QAsymmU8>(workloadFactory,
memoryManager);
}
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return StridedSliceShrinkAxisMaskBitPosition0Test<armnn::DataType::QuantisedAsymm8>(workloadFactory,
+ return StridedSliceShrinkAxisMaskBitPosition0Test<armnn::DataType::QAsymmU8>(workloadFactory,
memoryManager);
}
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return StridedSliceShrinkAxisMaskBitPosition1Test<armnn::DataType::QuantisedAsymm8>(workloadFactory,
+ return StridedSliceShrinkAxisMaskBitPosition1Test<armnn::DataType::QAsymmU8>(workloadFactory,
memoryManager);
}
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return StridedSliceShrinkAxisMaskBitPosition2Test<armnn::DataType::QuantisedAsymm8>(workloadFactory,
+ return StridedSliceShrinkAxisMaskBitPosition2Test<armnn::DataType::QAsymmU8>(workloadFactory,
memoryManager);
}
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return StridedSliceShrinkAxisMaskBitPosition3Test<armnn::DataType::QuantisedAsymm8>(workloadFactory,
+ return StridedSliceShrinkAxisMaskBitPosition3Test<armnn::DataType::QAsymmU8>(workloadFactory,
memoryManager);
}
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return StridedSliceShrinkAxisMaskBitPosition0And1Test<armnn::DataType::QuantisedAsymm8>(workloadFactory,
+ return StridedSliceShrinkAxisMaskBitPosition0And1Test<armnn::DataType::QAsymmU8>(workloadFactory,
memoryManager);
}
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return StridedSliceShrinkAxisMaskBitPosition0And2Test<armnn::DataType::QuantisedAsymm8>(workloadFactory,
+ return StridedSliceShrinkAxisMaskBitPosition0And2Test<armnn::DataType::QAsymmU8>(workloadFactory,
memoryManager);
}
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return StridedSliceShrinkAxisMaskBitPosition0And3Test<armnn::DataType::QuantisedAsymm8>(workloadFactory,
+ return StridedSliceShrinkAxisMaskBitPosition0And3Test<armnn::DataType::QAsymmU8>(workloadFactory,
memoryManager);
}
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return StridedSliceShrinkAxisMaskBitPosition0And1And3Test<armnn::DataType::QuantisedAsymm8>(workloadFactory,
+ return StridedSliceShrinkAxisMaskBitPosition0And1And3Test<armnn::DataType::QAsymmU8>(workloadFactory,
memoryManager);
}
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return StridedSlice3dTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
+ return StridedSlice3dTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 3> StridedSlice3dReverseUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return StridedSlice3dReverseTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
+ return StridedSlice3dReverseTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 2> StridedSlice2dUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return StridedSlice2dTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
+ return StridedSlice2dTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 2> StridedSlice2dReverseUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return StridedSlice2dReverseTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
+ return StridedSlice2dReverseTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
}
LayerTestResult<int16_t, 4> StridedSlice4dInt16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return StridedSlice4dTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
+ return StridedSlice4dTest<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
}
LayerTestResult<int16_t, 4> StridedSlice4dReverseInt16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return StridedSlice4dReverseTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
+ return StridedSlice4dReverseTest<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
}
LayerTestResult<int16_t, 4> StridedSliceSimpleStrideInt16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return StridedSliceSimpleStrideTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
+ return StridedSliceSimpleStrideTest<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
}
LayerTestResult<int16_t, 4> StridedSliceSimpleRangeMaskInt16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return StridedSliceSimpleRangeMaskTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
+ return StridedSliceSimpleRangeMaskTest<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
}
LayerTestResult<int16_t, 2> StridedSliceShrinkAxisMaskInt16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return StridedSliceShrinkAxisMaskTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
+ return StridedSliceShrinkAxisMaskTest<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
}
LayerTestResult<int16_t, 3> StridedSlice3dInt16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return StridedSlice3dTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
+ return StridedSlice3dTest<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
}
LayerTestResult<int16_t, 3> StridedSlice3dReverseInt16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return StridedSlice3dReverseTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
+ return StridedSlice3dReverseTest<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
}
LayerTestResult<int16_t, 2> StridedSlice2dInt16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return StridedSlice2dTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
+ return StridedSlice2dTest<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
}
LayerTestResult<int16_t, 2> StridedSlice2dReverseInt16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return StridedSlice2dReverseTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
+ return StridedSlice2dReverseTest<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
}
std::vector<uint8_t> input1 = { 1, 2, 1, 2 };
std::vector<uint8_t> output = { 3, 3, 5, 5 };
- return ElementwiseTestHelper<4, armnn::SubtractionQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
+ return ElementwiseTestHelper<4, armnn::SubtractionQueueDescriptor, armnn::DataType::QAsymmU8>(
workloadFactory,
memoryManager,
shape0,
std::vector<uint8_t> output = { 5, 6, 7, 8 };
- return ElementwiseTestHelper<4, armnn::SubtractionQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
+ return ElementwiseTestHelper<4, armnn::SubtractionQueueDescriptor, armnn::DataType::QAsymmU8>(
workloadFactory,
memoryManager,
shape0,
std::vector<uint8_t> output = { 8, 11, 12, 15 };
- return ElementwiseTestHelper<4, armnn::SubtractionQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
+ return ElementwiseTestHelper<4, armnn::SubtractionQueueDescriptor, armnn::DataType::QAsymmU8>(
workloadFactory,
memoryManager,
shape0,
std::vector<int16_t> input1 = { 1, 2, 1, 2 };
std::vector<int16_t> output = { 3, 3, 5, 5 };
- return ElementwiseTestHelper<4, armnn::SubtractionQueueDescriptor, armnn::DataType::QuantisedSymm16>(
+ return ElementwiseTestHelper<4, armnn::SubtractionQueueDescriptor, armnn::DataType::QSymmS16>(
workloadFactory,
memoryManager,
shape,
std::vector<int16_t> output = { 3, 4, 5, 6 };
- return ElementwiseTestHelper<4, armnn::SubtractionQueueDescriptor, armnn::DataType::QuantisedSymm16>(
+ return ElementwiseTestHelper<4, armnn::SubtractionQueueDescriptor, armnn::DataType::QSymmS16>(
workloadFactory,
memoryManager,
shape0,
std::vector<int16_t> output = { 8, 11, 12, 15 };
- return ElementwiseTestHelper<4, armnn::SubtractionQueueDescriptor, armnn::DataType::QuantisedSymm16>(
+ return ElementwiseTestHelper<4, armnn::SubtractionQueueDescriptor, armnn::DataType::QSymmS16>(
workloadFactory,
memoryManager,
shape0,
{
using namespace armnn;
- const DataType inputType = DataType::QuantisedAsymm8;
+ const DataType inputType = DataType::QAsymmU8;
const DataType kernelType = DataType::QuantizedSymm8PerAxis;
const DataType biasType = DataType::Signed32;
bool biasEnabled,
const armnn::DataLayout layout);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
-SimpleTransposeConvolution2dTest<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
+SimpleTransposeConvolution2dTest<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
bool biasEnabled,
const armnn::DataLayout layout);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
-SimpleTransposeConvolution2dTest<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
+SimpleTransposeConvolution2dTest<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
bool biasEnabled,
bool biasEnabled,
const armnn::DataLayout layout);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
-PaddedTransposeConvolution2dTest<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
+PaddedTransposeConvolution2dTest<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
bool biasEnabled,
const armnn::DataLayout layout);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
-PaddedTransposeConvolution2dTest<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
+PaddedTransposeConvolution2dTest<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
bool biasEnabled,
bool biasEnabled,
const armnn::DataLayout layout);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
-StridedTransposeConvolution2dTest<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
+StridedTransposeConvolution2dTest<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
bool biasEnabled,
const armnn::DataLayout layout);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
-StridedTransposeConvolution2dTest<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
+StridedTransposeConvolution2dTest<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
bool biasEnabled,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout layout);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
-MultiChannelTransposeConvolution2dTest<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
+MultiChannelTransposeConvolution2dTest<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout layout);
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
-MultiChannelTransposeConvolution2dTest<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
+MultiChannelTransposeConvolution2dTest<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout layout);
const ArgMinMaxDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
- if (input.GetDataType() == DataType::QuantisedAsymm8)
+ if (input.GetDataType() == DataType::QAsymmU8)
{
return false;
}
ClCreateElementwiseWorkloadTest<ClMultiplicationWorkload,
MultiplicationQueueDescriptor,
MultiplicationLayer,
- armnn::DataType::QuantisedAsymm8>();
+ armnn::DataType::QAsymmU8>();
}
BOOST_AUTO_TEST_CASE(CreateDivisionFloatWorkloadTest)
BOOST_AUTO_TEST_CASE(CreateDirectConvolution2dUint8Workload)
{
- ClDirectConvolution2dWorkloadTest<ClConvolution2dWorkload, armnn::DataType::QuantisedAsymm8>();
+ ClDirectConvolution2dWorkloadTest<ClConvolution2dWorkload, armnn::DataType::QAsymmU8>();
}
template <typename FullyConnectedWorkloadType, typename armnn::DataType DataType>
BOOST_AUTO_TEST_CASE(CreatePreluUint8Workload)
{
- ClCreatePreluWorkloadTest({ 1, 4, 1, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 }, DataType::QuantisedAsymm8);
+ ClCreatePreluWorkloadTest({ 1, 4, 1, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 }, DataType::QAsymmU8);
}
template <typename armnn::DataType DataType>
BOOST_AUTO_TEST_CASE(CreateReshapeUint8Workload)
{
- ClCreateReshapeWorkloadTest<armnn::DataType::QuantisedAsymm8>();
+ ClCreateReshapeWorkloadTest<armnn::DataType::QAsymmU8>();
}
template <typename SoftmaxWorkloadType, typename armnn::DataType DataType>
BOOST_AUTO_TEST_CASE(CreateResizeUint8NchwWorkload)
{
- ClResizeWorkloadTest<ClResizeWorkload, armnn::DataType::QuantisedAsymm8>(DataLayout::NCHW);
+ ClResizeWorkloadTest<ClResizeWorkload, armnn::DataType::QAsymmU8>(DataLayout::NCHW);
}
BOOST_AUTO_TEST_CASE(CreateResizeFloat32NhwcWorkload)
BOOST_AUTO_TEST_CASE(CreateResizeUint8NhwcWorkload)
{
- ClResizeWorkloadTest<ClResizeWorkload, armnn::DataType::QuantisedAsymm8>(DataLayout::NHWC);
+ ClResizeWorkloadTest<ClResizeWorkload, armnn::DataType::QAsymmU8>(DataLayout::NHWC);
}
template <typename MeanWorkloadType, typename armnn::DataType DataType>
BOOST_AUTO_TEST_CASE(CreateMeanUint8Workload)
{
- ClMeanWorkloadTest<ClMeanWorkload, armnn::DataType::QuantisedAsymm8>();
+ ClMeanWorkloadTest<ClMeanWorkload, armnn::DataType::QAsymmU8>();
}
template <typename ConcatWorkloadType, armnn::DataType DataType>
BOOST_AUTO_TEST_CASE(CreateConcatDim0Uint8Workload)
{
- ClCreateConcatWorkloadTest<ClConcatWorkload, armnn::DataType::QuantisedAsymm8>({ 4, 3, 2, 5 }, 0);
+ ClCreateConcatWorkloadTest<ClConcatWorkload, armnn::DataType::QAsymmU8>({ 4, 3, 2, 5 }, 0);
}
BOOST_AUTO_TEST_CASE(CreateConcatDim1Uint8Workload)
{
- ClCreateConcatWorkloadTest<ClConcatWorkload, armnn::DataType::QuantisedAsymm8>({ 2, 6, 2, 5 }, 1);
+ ClCreateConcatWorkloadTest<ClConcatWorkload, armnn::DataType::QAsymmU8>({ 2, 6, 2, 5 }, 1);
}
BOOST_AUTO_TEST_CASE(CreateConcatDim3Uint8Workload)
{
- ClCreateConcatWorkloadTest<ClConcatWorkload, armnn::DataType::QuantisedAsymm8>({ 2, 3, 2, 10 }, 3);
+ ClCreateConcatWorkloadTest<ClConcatWorkload, armnn::DataType::QAsymmU8>({ 2, 3, 2, 10 }, 3);
}
template <typename SpaceToDepthWorkloadType, typename armnn::DataType DataType>
BOOST_AUTO_TEST_CASE(CreateSpaceToDepthQAsymm8Workload)
{
- ClSpaceToDepthWorkloadTest<ClSpaceToDepthWorkload, armnn::DataType::QuantisedAsymm8>();
+ ClSpaceToDepthWorkloadTest<ClSpaceToDepthWorkload, armnn::DataType::QAsymmU8>();
}
BOOST_AUTO_TEST_CASE(CreateSpaceToDepthQSymm16Workload)
{
- ClSpaceToDepthWorkloadTest<ClSpaceToDepthWorkload, armnn::DataType::QuantisedSymm16>();
+ ClSpaceToDepthWorkloadTest<ClSpaceToDepthWorkload, armnn::DataType::QSymmS16>();
}
template <armnn::DataType DataType>
BOOST_AUTO_TEST_CASE(CreateStackUint8Workload)
{
- ClCreateStackWorkloadTest<armnn::DataType::QuantisedAsymm8>({ 3, 4, 5 }, { 3, 4, 2, 5 }, 2, 2);
+ ClCreateStackWorkloadTest<armnn::DataType::QAsymmU8>({ 3, 4, 5 }, { 3, 4, 2, 5 }, 2, 2);
}
template <typename QuantizedLstmWorkloadType>
BOOST_AUTO_TEST_CASE(ClConcatEndToEndDim0Uint8Test)
{
- ConcatDim0EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ ConcatDim0EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(ClConcatEndToEndDim1Test)
BOOST_AUTO_TEST_CASE(ClConcatEndToEndDim1Uint8Test)
{
- ConcatDim1EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ ConcatDim1EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(ClConcatEndToEndDim3Test)
BOOST_AUTO_TEST_CASE(ClConcatEndToEndDim3Uint8Test)
{
- ConcatDim3EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ ConcatDim3EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
// DepthToSpace
BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNchwUint8)
{
- DepthToSpaceEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends, armnn::DataLayout::NCHW);
+ DepthToSpaceEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, armnn::DataLayout::NCHW);
}
BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNchwInt16)
{
- DepthToSpaceEndToEnd<armnn::DataType::QuantisedSymm16>(defaultBackends, armnn::DataLayout::NCHW);
+ DepthToSpaceEndToEnd<armnn::DataType::QSymmS16>(defaultBackends, armnn::DataLayout::NCHW);
}
BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNhwcFloat32)
BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNhwcUint8)
{
- DepthToSpaceEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends, armnn::DataLayout::NHWC);
+ DepthToSpaceEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, armnn::DataLayout::NHWC);
}
BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNhwcInt16)
{
- DepthToSpaceEndToEnd<armnn::DataType::QuantisedSymm16>(defaultBackends, armnn::DataLayout::NHWC);
+ DepthToSpaceEndToEnd<armnn::DataType::QSymmS16>(defaultBackends, armnn::DataLayout::NHWC);
}
// Dequantize
BOOST_AUTO_TEST_CASE(DequantizeEndToEndSimpleTest)
{
- DequantizeEndToEndSimple<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ DequantizeEndToEndSimple<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(DequantizeEndToEndOffsetTest)
{
- DequantizeEndToEndOffset<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ DequantizeEndToEndOffset<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(ClGreaterSimpleEndToEndTest)
const std::vector<uint8_t> expectedOutput({ 0, 0, 0, 0, 1, 1, 1, 1,
0, 0, 0, 0, 0, 0, 0, 0 });
- ComparisonSimpleEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends,
+ ComparisonSimpleEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends,
ComparisonOperation::Greater,
expectedOutput);
}
const std::vector<uint8_t> expectedOutput({ 0, 1, 0, 0, 0, 1,
1, 1, 1, 1, 1, 1 });
- ComparisonBroadcastEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends,
+ ComparisonBroadcastEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends,
ComparisonOperation::Greater,
expectedOutput);
}
BOOST_AUTO_TEST_CASE(ClPreluEndToEndTestUint8)
{
- PreluEndToEndPositiveTest<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ PreluEndToEndPositiveTest<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(ClSpaceToDepthNhwcEndToEndTest1)
BOOST_AUTO_TEST_CASE(ClSplitter1dEndToEndUint8Test)
{
- Splitter1dEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ Splitter1dEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(ClSplitter2dDim0EndToEndTest)
BOOST_AUTO_TEST_CASE(ClSplitter2dDim0EndToEndUint8Test)
{
- Splitter2dDim0EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ Splitter2dDim0EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(ClSplitter2dDim1EndToEndUint8Test)
{
- Splitter2dDim1EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ Splitter2dDim1EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(ClSplitter3dDim0EndToEndTest)
BOOST_AUTO_TEST_CASE(ClSplitter3dDim0EndToEndUint8Test)
{
- Splitter3dDim0EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ Splitter3dDim0EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(ClSplitter3dDim1EndToEndUint8Test)
{
- Splitter3dDim1EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ Splitter3dDim1EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(ClSplitter3dDim2EndToEndUint8Test)
{
- Splitter3dDim2EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ Splitter3dDim2EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(ClSplitter4dDim0EndToEndTest)
BOOST_AUTO_TEST_CASE(ClSplitter4dDim0EndToEndUint8Test)
{
- Splitter4dDim0EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ Splitter4dDim0EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(ClSplitter4dDim1EndToEndUint8Test)
{
- Splitter4dDim1EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ Splitter4dDim1EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(ClSplitter4dDim2EndToEndUint8Test)
{
- Splitter4dDim2EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ Splitter4dDim2EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(ClSplitter4dDim3EndToEndUint8Test)
{
- Splitter4dDim3EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ Splitter4dDim3EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
// TransposeConvolution2d
BOOST_AUTO_TEST_CASE(ClTransposeConvolution2dEndToEndUint8NchwTest)
{
- TransposeConvolution2dEndToEnd<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
+ TransposeConvolution2dEndToEnd<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
defaultBackends, armnn::DataLayout::NCHW);
}
BOOST_AUTO_TEST_CASE(ClTransposeConvolution2dEndToEndUint8NhwcTest)
{
- TransposeConvolution2dEndToEnd<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
+ TransposeConvolution2dEndToEnd<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
defaultBackends, armnn::DataLayout::NHWC);
}
{
armnn::ClWorkloadFactory factory =
ClWorkloadFactoryHelper::GetFactory(ClWorkloadFactoryHelper::GetMemoryManager());
- IsLayerSupportedTests<armnn::ClWorkloadFactory, armnn::DataType::QuantisedAsymm8>(&factory);
+ IsLayerSupportedTests<armnn::ClWorkloadFactory, armnn::DataType::QAsymmU8>(&factory);
}
BOOST_FIXTURE_TEST_CASE(IsConvertFp16ToFp32SupportedCl, ClContextControlFixture)
ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwFloat322, BatchToSpaceNdNchwTest2<DataType::Float32>)
ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwFloat323, BatchToSpaceNdNchwTest3<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcUint1, BatchToSpaceNdNhwcTest1<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcUint2, BatchToSpaceNdNhwcTest2<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcUint3, BatchToSpaceNdNhwcTest3<DataType::QuantisedAsymm8>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcUint1, BatchToSpaceNdNhwcTest1<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcUint2, BatchToSpaceNdNhwcTest2<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcUint3, BatchToSpaceNdNhwcTest3<DataType::QAsymmU8>)
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwUint1, BatchToSpaceNdNchwTest1<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwUint2, BatchToSpaceNdNchwTest2<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwUint3, BatchToSpaceNdNchwTest3<DataType::QuantisedAsymm8>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwUint1, BatchToSpaceNdNchwTest1<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwUint2, BatchToSpaceNdNchwTest2<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwUint3, BatchToSpaceNdNchwTest3<DataType::QAsymmU8>)
// Fully Connected
ARMNN_AUTO_TEST_CASE(SimpleFullyConnected, FullyConnectedFloat32Test, false, false)
ARMNN_AUTO_TEST_CASE(SimpleFullyConnectedWithBias, FullyConnectedFloat32Test, true, false)
ARMNN_AUTO_TEST_CASE(SimpleFullyConnectedWithTranspose, FullyConnectedFloat32Test, false, true)
-ARMNN_AUTO_TEST_CASE(FullyConnectedUint8, FullyConnectedTest<DataType::QuantisedAsymm8>, false)
-ARMNN_AUTO_TEST_CASE(FullyConnectedBiasedUint8, FullyConnectedTest<DataType::QuantisedAsymm8>, true)
+ARMNN_AUTO_TEST_CASE(FullyConnectedUint8, FullyConnectedTest<DataType::QAsymmU8>, false)
+ARMNN_AUTO_TEST_CASE(FullyConnectedBiasedUint8, FullyConnectedTest<DataType::QAsymmU8>, true)
ARMNN_AUTO_TEST_CASE(FullyConnectedLarge, FullyConnectedLargeTest, false)
ARMNN_AUTO_TEST_CASE(FullyConnectedLargeTransposed, FullyConnectedLargeTest, true)
false,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(Convolution2d3x3Dilation3x3Uint8,
- Convolution2d3x3Dilation3x3Test<DataType::QuantisedAsymm8, DataType::Signed32>,
+ Convolution2d3x3Dilation3x3Test<DataType::QAsymmU8, DataType::Signed32>,
false,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(Convolution2d3x3Dilation3x3NhwcUint8,
- Convolution2d3x3Dilation3x3Test<DataType::QuantisedAsymm8, DataType::Signed32>,
+ Convolution2d3x3Dilation3x3Test<DataType::QAsymmU8, DataType::Signed32>,
false,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(Convolution2d2x3x3Dilation3x3,
false,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(Convolution2d2x3x3Dilation3x3Uint8,
- Convolution2d2x3x3Dilation3x3Test<DataType::QuantisedAsymm8, DataType::Signed32>,
+ Convolution2d2x3x3Dilation3x3Test<DataType::QAsymmU8, DataType::Signed32>,
false,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(Convolution2d2x3x3Dilation3x3NhwcUint8,
- Convolution2d2x3x3Dilation3x3Test<DataType::QuantisedAsymm8, DataType::Signed32>,
+ Convolution2d2x3x3Dilation3x3Test<DataType::QAsymmU8, DataType::Signed32>,
false,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(Convolution2d2x2Dilation2x2Padding2x2Stride3x3,
false,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(Convolution2d2x2Dilation2x2Padding2x2Stride3x3Uint8,
- Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<DataType::QuantisedAsymm8, DataType::Signed32>,
+ Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<DataType::QAsymmU8, DataType::Signed32>,
false,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(Convolution2d2x2Dilation2x2Padding2x2Stride3x3NhwcUint8,
- Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<DataType::QuantisedAsymm8, DataType::Signed32>,
+ Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<DataType::QAsymmU8, DataType::Signed32>,
false,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(SimpleConcat, ConcatTest)
ARMNN_AUTO_TEST_CASE(ConcatUint8, ConcatUint8Test)
ARMNN_AUTO_TEST_CASE(ConcatUint8DifferentInputOutputQParam,
- ConcatDifferentInputOutputQParamTest<DataType::QuantisedAsymm8>, false)
+ ConcatDifferentInputOutputQParamTest<DataType::QAsymmU8>, false)
// Normalization
ARMNN_AUTO_TEST_CASE(SimpleNormalizationAcross, SimpleNormalizationAcrossTest)
ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwFloat16_3, DepthToSpaceTest3<DataType::Float16>, DataLayout::NCHW);
ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwFloat16_4, DepthToSpaceTest4<DataType::Float16>, DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwUint8_1, DepthToSpaceTest1<DataType::QuantisedAsymm8>, DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwUint8_2, DepthToSpaceTest2<DataType::QuantisedAsymm8>, DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwUint8_3, DepthToSpaceTest3<DataType::QuantisedAsymm8>, DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwUint8_4, DepthToSpaceTest4<DataType::QuantisedAsymm8>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwUint8_1, DepthToSpaceTest1<DataType::QAsymmU8>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwUint8_2, DepthToSpaceTest2<DataType::QAsymmU8>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwUint8_3, DepthToSpaceTest3<DataType::QAsymmU8>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwUint8_4, DepthToSpaceTest4<DataType::QAsymmU8>, DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt16_1, DepthToSpaceTest1<DataType::QuantisedSymm16>, DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt16_2, DepthToSpaceTest2<DataType::QuantisedSymm16>, DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt16_3, DepthToSpaceTest3<DataType::QuantisedSymm16>, DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt16_4, DepthToSpaceTest4<DataType::QuantisedSymm16>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt16_1, DepthToSpaceTest1<DataType::QSymmS16>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt16_2, DepthToSpaceTest2<DataType::QSymmS16>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt16_3, DepthToSpaceTest3<DataType::QSymmS16>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt16_4, DepthToSpaceTest4<DataType::QSymmS16>, DataLayout::NCHW);
ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcFloat32_1, DepthToSpaceTest1<DataType::Float32>, DataLayout::NHWC);
ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcFloat32_2, DepthToSpaceTest2<DataType::Float32>, DataLayout::NHWC);
ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcFloat16_3, DepthToSpaceTest3<DataType::Float16>, DataLayout::NHWC);
ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcFloat16_4, DepthToSpaceTest4<DataType::Float16>, DataLayout::NHWC);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcUint8_1, DepthToSpaceTest1<DataType::QuantisedAsymm8>, DataLayout::NHWC);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcUint8_2, DepthToSpaceTest2<DataType::QuantisedAsymm8>, DataLayout::NHWC);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcUint8_3, DepthToSpaceTest3<DataType::QuantisedAsymm8>, DataLayout::NHWC);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcUint8_4, DepthToSpaceTest4<DataType::QuantisedAsymm8>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcUint8_1, DepthToSpaceTest1<DataType::QAsymmU8>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcUint8_2, DepthToSpaceTest2<DataType::QAsymmU8>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcUint8_3, DepthToSpaceTest3<DataType::QAsymmU8>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcUint8_4, DepthToSpaceTest4<DataType::QAsymmU8>, DataLayout::NHWC);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt16_1, DepthToSpaceTest1<DataType::QuantisedSymm16>, DataLayout::NHWC);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt16_2, DepthToSpaceTest2<DataType::QuantisedSymm16>, DataLayout::NHWC);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt16_3, DepthToSpaceTest3<DataType::QuantisedSymm16>, DataLayout::NHWC);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt16_4, DepthToSpaceTest4<DataType::QuantisedSymm16>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt16_1, DepthToSpaceTest1<DataType::QSymmS16>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt16_2, DepthToSpaceTest2<DataType::QSymmS16>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt16_3, DepthToSpaceTest3<DataType::QSymmS16>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt16_4, DepthToSpaceTest4<DataType::QSymmS16>, DataLayout::NHWC);
// Floor
ARMNN_AUTO_TEST_CASE(SimpleFloor, SimpleFloorTest<DataType::Float32>)
// Reshape
ARMNN_AUTO_TEST_CASE(SimpleReshapeFloat32, SimpleReshapeTest<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE(SimpleReshapeUint8, SimpleReshapeTest<DataType::QuantisedAsymm8>)
+ARMNN_AUTO_TEST_CASE(SimpleReshapeUint8, SimpleReshapeTest<DataType::QAsymmU8>)
ARMNN_AUTO_TEST_CASE(Reshape5d, Reshape5dTest<DataType::Float32>)
// Pad
ARMNN_AUTO_TEST_CASE(PadUint83d, PadUint83dTest)
ARMNN_AUTO_TEST_CASE(PadUint84d, PadUint84dTest)
-ARMNN_AUTO_TEST_CASE(Pad2dQSymm16, Pad2dTestCommon<DataType::QuantisedSymm16>, 2.0f, 0, 0.0f)
-ARMNN_AUTO_TEST_CASE(Pad2dQSymm16CustomPadding, Pad2dTestCommon<DataType::QuantisedSymm16>, 2.0f, 0, 1.0f)
-ARMNN_AUTO_TEST_CASE(Pad3dQSymm16, Pad3dTestCommon<DataType::QuantisedSymm16>, 2.0f, 0)
-ARMNN_AUTO_TEST_CASE(Pad4dQSymm16, Pad4dTestCommon<DataType::QuantisedSymm16>, 2.0f, 0)
+ARMNN_AUTO_TEST_CASE(Pad2dQSymm16, Pad2dTestCommon<DataType::QSymmS16>, 2.0f, 0, 0.0f)
+ARMNN_AUTO_TEST_CASE(Pad2dQSymm16CustomPadding, Pad2dTestCommon<DataType::QSymmS16>, 2.0f, 0, 1.0f)
+ARMNN_AUTO_TEST_CASE(Pad3dQSymm16, Pad3dTestCommon<DataType::QSymmS16>, 2.0f, 0)
+ARMNN_AUTO_TEST_CASE(Pad4dQSymm16, Pad4dTestCommon<DataType::QSymmS16>, 2.0f, 0)
// PReLU
ARMNN_AUTO_TEST_CASE(PreluFloat32, PreluTest<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE(PreluUint8, PreluTest<DataType::QuantisedAsymm8>)
+ARMNN_AUTO_TEST_CASE(PreluUint8, PreluTest<DataType::QAsymmU8>)
// Permute
ARMNN_AUTO_TEST_CASE(SimplePermuteFloat32, SimplePermuteTest<DataType::Float32>)
ARMNN_AUTO_TEST_CASE(PermuteFloat32ValueSet1Test, PermuteValueSet1Test<DataType::Float32>)
ARMNN_AUTO_TEST_CASE(PermuteFloat32ValueSet2Test, PermuteValueSet2Test<DataType::Float32>)
ARMNN_AUTO_TEST_CASE(PermuteFloat32ValueSet3Test, PermuteValueSet3Test<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE(SimplePermuteQASymm8, SimplePermuteTest<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(PermuteQASymm8ValueSet1Test, PermuteValueSet1Test<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(PermuteQASymm8ValueSet2Test, PermuteValueSet2Test<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(PermuteQASymm8ValueSet3Test, PermuteValueSet3Test<DataType::QuantisedAsymm8>)
+ARMNN_AUTO_TEST_CASE(SimplePermuteQASymm8, SimplePermuteTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(PermuteQASymm8ValueSet1Test, PermuteValueSet1Test<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(PermuteQASymm8ValueSet2Test, PermuteValueSet2Test<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(PermuteQASymm8ValueSet3Test, PermuteValueSet3Test<DataType::QAsymmU8>)
// Lstm
ARMNN_AUTO_TEST_CASE(LstmLayerFloat32WithCifgWithPeepholeNoProjection,
ARMNN_AUTO_TEST_CASE(MeanVts2Float32, MeanVts2Test<DataType::Float32>)
ARMNN_AUTO_TEST_CASE(MeanVts3Float32, MeanVts3Test<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE(MeanSimpleQuantisedAsymm8, MeanSimpleTest<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(MeanSimpleAxisQuantisedAsymm8, MeanSimpleAxisTest<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(MeanKeepDimsQuantisedAsymm8, MeanKeepDimsTest<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(MeanMultipleDimsQuantisedAsymm8, MeanMultipleDimsTest<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(MeanVts1QuantisedAsymm8, MeanVts1Test<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(MeanVts2QuantisedAsymm8, MeanVts2Test<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(MeanVts3QuantisedAsymm8, MeanVts3Test<DataType::QuantisedAsymm8>)
+ARMNN_AUTO_TEST_CASE(MeanSimpleQuantisedAsymm8, MeanSimpleTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(MeanSimpleAxisQuantisedAsymm8, MeanSimpleAxisTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(MeanKeepDimsQuantisedAsymm8, MeanKeepDimsTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(MeanMultipleDimsQuantisedAsymm8, MeanMultipleDimsTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(MeanVts1QuantisedAsymm8, MeanVts1Test<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(MeanVts2QuantisedAsymm8, MeanVts2Test<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(MeanVts3QuantisedAsymm8, MeanVts3Test<DataType::QAsymmU8>)
// Minimum
ARMNN_AUTO_TEST_CASE(MinimumBroadcast1Element1, MinimumBroadcast1ElementTest1)
SimpleResizeBilinearTest<DataType::Float32>,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearUint8,
- SimpleResizeBilinearTest<DataType::QuantisedAsymm8>,
+ SimpleResizeBilinearTest<DataType::QAsymmU8>,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(ResizeBilinearNop,
ResizeBilinearNopTest<DataType::Float32>,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(ResizeBilinearNopUint8,
- ResizeBilinearNopTest<DataType::QuantisedAsymm8>,
+ ResizeBilinearNopTest<DataType::QAsymmU8>,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMin,
ResizeBilinearSqMinTest<DataType::Float32>,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinUint8,
- ResizeBilinearSqMinTest<DataType::QuantisedAsymm8>,
+ ResizeBilinearSqMinTest<DataType::QAsymmU8>,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(ResizeBilinearMin,
ResizeBilinearMinTest<DataType::Float32>,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(ResizeBilinearMinUint8,
- ResizeBilinearMinTest<DataType::QuantisedAsymm8>,
+ ResizeBilinearMinTest<DataType::QAsymmU8>,
DataLayout::NCHW)
// Resize Bilinear - NHWC
ResizeBilinearNopTest<DataType::Float32>,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(ResizeBilinearNopUint8Nhwc,
- ResizeBilinearNopTest<DataType::QuantisedAsymm8>,
+ ResizeBilinearNopTest<DataType::QAsymmU8>,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearNhwc,
SimpleResizeBilinearTest<DataType::Float32>,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearUint8Nhwc,
- SimpleResizeBilinearTest<DataType::QuantisedAsymm8>,
+ SimpleResizeBilinearTest<DataType::QAsymmU8>,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinNhwc,
ResizeBilinearSqMinTest<DataType::Float32>,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinUint8Nhwc,
- ResizeBilinearSqMinTest<DataType::QuantisedAsymm8>,
+ ResizeBilinearSqMinTest<DataType::QAsymmU8>,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(ResizeBilinearMinNhwc,
ResizeBilinearMinTest<DataType::Float32>,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(ResizeBilinearMinUint8Nhwc,
- ResizeBilinearMinTest<DataType::QuantisedAsymm8>,
+ ResizeBilinearMinTest<DataType::QAsymmU8>,
DataLayout::NHWC)
// Resize NearestNeighbor - NCHW
SimpleResizeNearestNeighborTest<DataType::Float32>,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighborUint8,
- SimpleResizeNearestNeighborTest<DataType::QuantisedAsymm8>,
+ SimpleResizeNearestNeighborTest<DataType::QAsymmU8>,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborNop,
ResizeNearestNeighborNopTest<DataType::Float32>,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborNopUint8,
- ResizeNearestNeighborNopTest<DataType::QuantisedAsymm8>,
+ ResizeNearestNeighborNopTest<DataType::QAsymmU8>,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMin,
ResizeNearestNeighborSqMinTest<DataType::Float32>,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMinUint8,
- ResizeNearestNeighborSqMinTest<DataType::QuantisedAsymm8>,
+ ResizeNearestNeighborSqMinTest<DataType::QAsymmU8>,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMin,
ResizeNearestNeighborMinTest<DataType::Float32>,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMinUint8,
- ResizeNearestNeighborMinTest<DataType::QuantisedAsymm8>,
+ ResizeNearestNeighborMinTest<DataType::QAsymmU8>,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMag,
ResizeNearestNeighborMagTest<DataType::Float32>,
DataLayout::NCHW, 0.1f, 50, 0.1f, 50)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMagUint8,
- ResizeNearestNeighborMagTest<DataType::QuantisedAsymm8>,
+ ResizeNearestNeighborMagTest<DataType::QAsymmU8>,
DataLayout::NCHW, 0.1f, 50, 0.1f, 50)
// Resize NearestNeighbor - NHWC
ResizeNearestNeighborNopTest<DataType::Float32>,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborNopUint8Nhwc,
- ResizeNearestNeighborNopTest<DataType::QuantisedAsymm8>,
+ ResizeNearestNeighborNopTest<DataType::QAsymmU8>,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighborNhwc,
SimpleResizeNearestNeighborTest<DataType::Float32>,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighborUint8Nhwc,
- SimpleResizeNearestNeighborTest<DataType::QuantisedAsymm8>,
+ SimpleResizeNearestNeighborTest<DataType::QAsymmU8>,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMinNhwc,
ResizeNearestNeighborSqMinTest<DataType::Float32>,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMinUint8Nhwc,
- ResizeNearestNeighborSqMinTest<DataType::QuantisedAsymm8>,
+ ResizeNearestNeighborSqMinTest<DataType::QAsymmU8>,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMinNhwc,
ResizeNearestNeighborMinTest<DataType::Float32>,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMinUint8Nhwc,
- ResizeNearestNeighborMinTest<DataType::QuantisedAsymm8>,
+ ResizeNearestNeighborMinTest<DataType::QAsymmU8>,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMagNhwc,
ResizeNearestNeighborMagTest<DataType::Float32>,
DataLayout::NHWC, 0.1f, 50, 0.1f, 50)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMagUint8Nhwc,
- ResizeNearestNeighborMagTest<DataType::QuantisedAsymm8>,
+ ResizeNearestNeighborMagTest<DataType::QAsymmU8>,
DataLayout::NHWC, 0.1f, 50, 0.1f, 50)
// Rsqrt
true,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(SimpleTransposeConvolution2dUint8Nchw,
- SimpleTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+ SimpleTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
true,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(SimpleTransposeConvolution2dUint8Nhwc,
- SimpleTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+ SimpleTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
true,
DataLayout::NHWC)
true,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(UnbiasedSimpleTransposeConvolution2dUint8Nchw,
- SimpleTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+ SimpleTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
true,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(UnbiasedSimpleTransposeConvolution2dUint8Nhwc,
- SimpleTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+ SimpleTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
true,
DataLayout::NHWC)
true,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(PaddedTransposeConvolution2dUint8Nchw,
- PaddedTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+ PaddedTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
true,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(PaddedTransposeConvolution2dUint8Nhwc,
- PaddedTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+ PaddedTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
true,
DataLayout::NHWC)
true,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(UnbiasedPaddedTransposeConvolution2dUint8Nchw,
- PaddedTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+ PaddedTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
true,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(UnbiasedPaddedTransposeConvolution2dUint8Nhwc,
- PaddedTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+ PaddedTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
true,
DataLayout::NHWC)
true,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(StridedTransposeConvolution2dUint8Nchw,
- StridedTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+ StridedTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
true,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(StridedTransposeConvolution2dUint8Nhwc,
- StridedTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+ StridedTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
true,
DataLayout::NHWC)
true,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(UnbiasedStridedTransposeConvolution2dUint8Nchw,
- StridedTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+ StridedTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
true,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(UnbiasedStridedTransposeConvolution2dUint8Nhwc,
- StridedTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+ StridedTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
true,
DataLayout::NHWC)
MultiChannelTransposeConvolution2dTest<DataType::Float32, DataType::Float32>,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(MultiChannelTransposeConvolution2dUint8Nchw,
- MultiChannelTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+ MultiChannelTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(MultiChannelTransposeConvolution2dUint8Nhwc,
- MultiChannelTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+ MultiChannelTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
DataLayout::NHWC)
// Abs
}
template class ClGreaterWorkload<DataType::Float32>;
-template class ClGreaterWorkload<DataType::QuantisedAsymm8>;
+template class ClGreaterWorkload<DataType::QAsymmU8>;
} //namespace armnn
};
using ClGreaterFloat32Workload = ClGreaterWorkload<DataType::Float32>;
-using ClGreaterUint8Workload = ClGreaterWorkload<DataType::QuantisedAsymm8>;
+using ClGreaterUint8Workload = ClGreaterWorkload<DataType::QAsymmU8>;
} //namespace armnn
case DataType::Float32:
CopyArmComputeClTensorData(clTensor, handle->GetConstTensor<float>());
break;
- case DataType::QuantisedAsymm8:
+ case DataType::QAsymmU8:
CopyArmComputeClTensorData(clTensor, handle->GetConstTensor<uint8_t>());
break;
case DataType::QuantizedSymm8PerAxis:
NeonCreateElementwiseWorkloadTest<NeonSubtractionWorkload,
SubtractionQueueDescriptor,
SubtractionLayer,
- DataType::QuantisedAsymm8>();
+ DataType::QAsymmU8>();
}
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
NeonCreateElementwiseWorkloadTest<NeonMultiplicationWorkload,
MultiplicationQueueDescriptor,
MultiplicationLayer,
- DataType::QuantisedAsymm8>();
+ DataType::QAsymmU8>();
}
template <typename WorkloadType,
BOOST_AUTO_TEST_CASE(CreatePooling2dUint8NchwWorkload)
{
- NeonCreatePooling2dWorkloadTest<DataType::QuantisedAsymm8>(DataLayout::NCHW);
+ NeonCreatePooling2dWorkloadTest<DataType::QAsymmU8>(DataLayout::NCHW);
}
BOOST_AUTO_TEST_CASE(CreatePooling2dUint8NhwcWorkload)
{
- NeonCreatePooling2dWorkloadTest<DataType::QuantisedAsymm8>(DataLayout::NHWC);
+ NeonCreatePooling2dWorkloadTest<DataType::QAsymmU8>(DataLayout::NHWC);
}
static void NeonCreatePreluWorkloadTest(const armnn::TensorShape& inputShape,
BOOST_AUTO_TEST_CASE(CreatePreluUint8Workload)
{
- NeonCreatePreluWorkloadTest({ 1, 4, 1, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 }, DataType::QuantisedAsymm8);
+ NeonCreatePreluWorkloadTest({ 1, 4, 1, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 }, DataType::QAsymmU8);
}
template <typename armnn::DataType DataType>
BOOST_AUTO_TEST_CASE(CreateReshapeUint8Workload)
{
- NeonCreateReshapeWorkloadTest<DataType::QuantisedAsymm8>();
+ NeonCreateReshapeWorkloadTest<DataType::QAsymmU8>();
}
template <typename ResizeWorkloadType, armnn::DataType DataType>
BOOST_AUTO_TEST_CASE(CreateResizeUint8NchwWorkload)
{
- NeonCreateResizeWorkloadTest<NeonResizeWorkload, armnn::DataType::QuantisedAsymm8>(DataLayout::NCHW);
+ NeonCreateResizeWorkloadTest<NeonResizeWorkload, armnn::DataType::QAsymmU8>(DataLayout::NCHW);
}
BOOST_AUTO_TEST_CASE(CreateResizeFloat32NhwcWorkload)
BOOST_AUTO_TEST_CASE(CreateResizeUint8NhwcWorkload)
{
- NeonCreateResizeWorkloadTest<NeonResizeWorkload, armnn::DataType::QuantisedAsymm8>(DataLayout::NHWC);
+ NeonCreateResizeWorkloadTest<NeonResizeWorkload, armnn::DataType::QAsymmU8>(DataLayout::NHWC);
}
template <typename SoftmaxWorkloadType, typename armnn::DataType DataType>
BOOST_AUTO_TEST_CASE(CreateSpaceToDepthQAsymm8Workload)
{
- NeonSpaceToDepthWorkloadTest<NeonSpaceToDepthWorkload, armnn::DataType::QuantisedAsymm8>();
+ NeonSpaceToDepthWorkloadTest<NeonSpaceToDepthWorkload, armnn::DataType::QAsymmU8>();
}
BOOST_AUTO_TEST_CASE(CreateSpaceToDepthQSymm16Workload)
{
- NeonSpaceToDepthWorkloadTest<NeonSpaceToDepthWorkload, armnn::DataType::QuantisedSymm16>();
+ NeonSpaceToDepthWorkloadTest<NeonSpaceToDepthWorkload, armnn::DataType::QSymmS16>();
}
BOOST_AUTO_TEST_CASE(CreateSplitterWorkload)
BOOST_AUTO_TEST_CASE(CreateConcatDim0Uint8Workload)
{
- NeonCreateConcatWorkloadTest<NeonConcatWorkload, armnn::DataType::QuantisedAsymm8>({ 4, 3, 2, 5 }, 0);
+ NeonCreateConcatWorkloadTest<NeonConcatWorkload, armnn::DataType::QAsymmU8>({ 4, 3, 2, 5 }, 0);
}
BOOST_AUTO_TEST_CASE(CreateConcatDim1Uint8Workload)
{
- NeonCreateConcatWorkloadTest<NeonConcatWorkload, armnn::DataType::QuantisedAsymm8>({ 2, 6, 2, 5 }, 1);
+ NeonCreateConcatWorkloadTest<NeonConcatWorkload, armnn::DataType::QAsymmU8>({ 2, 6, 2, 5 }, 1);
}
BOOST_AUTO_TEST_CASE(CreateConcatDim3Uint8Workload)
{
- NeonCreateConcatWorkloadTest<NeonConcatWorkload, armnn::DataType::QuantisedAsymm8>({ 2, 3, 2, 10 }, 3);
+ NeonCreateConcatWorkloadTest<NeonConcatWorkload, armnn::DataType::QAsymmU8>({ 2, 3, 2, 10 }, 3);
}
template <armnn::DataType DataType>
BOOST_AUTO_TEST_CASE(CreateStackUint8Workload)
{
- NeonCreateStackWorkloadTest<armnn::DataType::QuantisedAsymm8>({ 3, 4, 5 }, { 3, 4, 2, 5 }, 2, 2);
+ NeonCreateStackWorkloadTest<armnn::DataType::QAsymmU8>({ 3, 4, 5 }, { 3, 4, 2, 5 }, 2, 2);
}
template <typename QuantizedLstmWorkloadType>
const std::vector<uint8_t> expectedOutput({ 0, 0, 0, 0, 1, 1, 1, 1,
0, 0, 0, 0, 0, 0, 0, 0 });
- ComparisonSimpleEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends,
+ ComparisonSimpleEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends,
ComparisonOperation::Greater,
expectedOutput);
}
const std::vector<uint8_t> expectedOutput({ 0, 1, 0, 0, 0, 1,
1, 1, 1, 1, 1, 1 });
- ComparisonBroadcastEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends,
+ ComparisonBroadcastEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends,
ComparisonOperation::Greater,
expectedOutput);
}
BOOST_AUTO_TEST_CASE(NeonConcatEndToEndDim0Uint8Test)
{
- ConcatDim0EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ ConcatDim0EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(NeonConcatEndToEndDim1Test)
BOOST_AUTO_TEST_CASE(NeonConcatEndToEndDim1Uint8Test)
{
- ConcatDim1EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ ConcatDim1EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(NeonConcatEndToEndDim3Test)
BOOST_AUTO_TEST_CASE(NeonConcatEndToEndDim3Uint8Test)
{
- ConcatDim3EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ ConcatDim3EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
// DepthToSpace
BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNchwUint8)
{
- DepthToSpaceEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends, armnn::DataLayout::NCHW);
+ DepthToSpaceEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, armnn::DataLayout::NCHW);
}
BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNchwInt16)
{
- DepthToSpaceEndToEnd<armnn::DataType::QuantisedSymm16>(defaultBackends, armnn::DataLayout::NCHW);
+ DepthToSpaceEndToEnd<armnn::DataType::QSymmS16>(defaultBackends, armnn::DataLayout::NCHW);
}
BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNhwcFloat32)
BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNhwcUint8)
{
- DepthToSpaceEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends, armnn::DataLayout::NHWC);
+ DepthToSpaceEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, armnn::DataLayout::NHWC);
}
BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNhwcInt16)
{
- DepthToSpaceEndToEnd<armnn::DataType::QuantisedSymm16>(defaultBackends, armnn::DataLayout::NHWC);
+ DepthToSpaceEndToEnd<armnn::DataType::QSymmS16>(defaultBackends, armnn::DataLayout::NHWC);
}
// Dequantize
BOOST_AUTO_TEST_CASE(DequantizeEndToEndSimpleTest)
{
- DequantizeEndToEndSimple<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ DequantizeEndToEndSimple<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(DequantizeEndToEndOffsetTest)
{
- DequantizeEndToEndOffset<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ DequantizeEndToEndOffset<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(NeonPreluEndToEndFloat32Test)
BOOST_AUTO_TEST_CASE(NeonPreluEndToEndTestUint8Test)
{
- PreluEndToEndPositiveTest<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ PreluEndToEndPositiveTest<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(NeonSpaceToDepthNhwcEndToEndTest1)
BOOST_AUTO_TEST_CASE(NeonSplitter1dEndToEndUint8Test)
{
- Splitter1dEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ Splitter1dEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(NeonSplitter2dDim0EndToEndTest)
BOOST_AUTO_TEST_CASE(NeonSplitter2dDim0EndToEndUint8Test)
{
- Splitter2dDim0EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ Splitter2dDim0EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(NeonSplitter2dDim1EndToEndUint8Test)
{
- Splitter2dDim1EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ Splitter2dDim1EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(NeonSplitter3dDim0EndToEndTest)
BOOST_AUTO_TEST_CASE(NeonSplitter3dDim0EndToEndUint8Test)
{
- Splitter3dDim0EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ Splitter3dDim0EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(NeonSplitter3dDim1EndToEndUint8Test)
{
- Splitter3dDim1EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ Splitter3dDim1EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(NeonSplitter3dDim2EndToEndUint8Test)
{
- Splitter3dDim2EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ Splitter3dDim2EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(NeonSplitter4dDim0EndToEndTest)
BOOST_AUTO_TEST_CASE(NeonSplitter4dDim0EndToEndUint8Test)
{
- Splitter4dDim0EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ Splitter4dDim0EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(NeonSplitter4dDim1EndToEndUint8Test)
{
- Splitter4dDim1EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ Splitter4dDim1EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(NeonSplitter4dDim2EndToEndUint8Test)
{
- Splitter4dDim2EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ Splitter4dDim2EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(NeonSplitter4dDim3EndToEndUint8Test)
{
- Splitter4dDim3EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ Splitter4dDim3EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(NeonQuantizedLstmEndToEndTest)
BOOST_AUTO_TEST_CASE(NeonTransposeConvolution2dEndToEndUint8NchwTest)
{
- TransposeConvolution2dEndToEnd<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
+ TransposeConvolution2dEndToEnd<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
defaultBackends, armnn::DataLayout::NCHW);
}
BOOST_AUTO_TEST_CASE(NeonTransposeConvolution2dEndToEndUint8NhwcTest)
{
- TransposeConvolution2dEndToEnd<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
+ TransposeConvolution2dEndToEnd<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
defaultBackends, armnn::DataLayout::NHWC);
}
BOOST_AUTO_TEST_CASE(NeonArgMaxSimpleTestQuantisedAsymm8)
{
- ArgMaxEndToEndSimple<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ ArgMaxEndToEndSimple<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(NeonArgMinSimpleTestQuantisedAsymm8)
{
- ArgMinEndToEndSimple<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ ArgMinEndToEndSimple<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(NeonArgMaxAxis0TestQuantisedAsymm8)
{
- ArgMaxAxis0EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ ArgMaxAxis0EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(NeonArgMinAxis0TestQuantisedAsymm8)
{
- ArgMinAxis0EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ ArgMinAxis0EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(NeonArgMaxAxis1TestQuantisedAsymm8)
{
- ArgMaxAxis1EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ ArgMaxAxis1EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(NeonArgMinAxis1TestQuantisedAsymm8)
{
- ArgMinAxis1EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ ArgMinAxis1EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(NeonArgMaxAxis2TestQuantisedAsymm8)
{
- ArgMaxAxis2EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ ArgMaxAxis2EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(NeonArgMinAxis2TestQuantisedAsymm8)
{
- ArgMinAxis2EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ ArgMinAxis2EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(NeonArgMaxAxis3TestQuantisedAsymm8)
{
- ArgMaxAxis3EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ ArgMaxAxis3EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(NeonArgMinAxis3TestQuantisedAsymm8)
{
- ArgMinAxis3EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ ArgMinAxis3EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(NeonDetectionPostProcessRegularNmsTest)
QuantizeData(qBoxEncodings.data(), boxEncodings.data(), boxEncodingsInfo);
QuantizeData(qScores.data(), scores.data(), scoresInfo);
QuantizeData(qAnchors.data(), anchors.data(), anchorsInfo);
- DetectionPostProcessRegularNmsEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends, qBoxEncodings,
+ DetectionPostProcessRegularNmsEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, qBoxEncodings,
qScores, qAnchors,
1.0f, 1, 0.01f, 0, 0.5f, 0);
}
QuantizeData(qBoxEncodings.data(), boxEncodings.data(), boxEncodingsInfo);
QuantizeData(qScores.data(), scores.data(), scoresInfo);
QuantizeData(qAnchors.data(), anchors.data(), anchorsInfo);
- DetectionPostProcessFastNmsEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends, qBoxEncodings,
+ DetectionPostProcessFastNmsEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, qBoxEncodings,
qScores, qAnchors,
1.0f, 1, 0.01f, 0, 0.5f, 0);
}
{
armnn::NeonWorkloadFactory factory =
NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager());
- IsLayerSupportedTests<armnn::NeonWorkloadFactory, armnn::DataType::QuantisedAsymm8>(&factory);
+ IsLayerSupportedTests<armnn::NeonWorkloadFactory, armnn::DataType::QAsymmU8>(&factory);
}
BOOST_AUTO_TEST_CASE(IsConvertFp16ToFp32SupportedNeon)
false,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(Convolution2d3x3Dilation3x3Uint8,
- Convolution2d3x3Dilation3x3Test<DataType::QuantisedAsymm8, DataType::Signed32>,
+ Convolution2d3x3Dilation3x3Test<DataType::QAsymmU8, DataType::Signed32>,
false,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(Convolution2d3x3Dilation3x3NhwcUint8,
- Convolution2d3x3Dilation3x3Test<DataType::QuantisedAsymm8, DataType::Signed32>,
+ Convolution2d3x3Dilation3x3Test<DataType::QAsymmU8, DataType::Signed32>,
false,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(Convolution2d2x3x3Dilation3x3,
false,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(Convolution2d2x3x3Dilation3x3Uint8,
- Convolution2d2x3x3Dilation3x3Test<DataType::QuantisedAsymm8, DataType::Signed32>,
+ Convolution2d2x3x3Dilation3x3Test<DataType::QAsymmU8, DataType::Signed32>,
false,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(Convolution2d2x3x3Dilation3x3NhwcUint8,
- Convolution2d2x3x3Dilation3x3Test<DataType::QuantisedAsymm8, DataType::Signed32>,
+ Convolution2d2x3x3Dilation3x3Test<DataType::QAsymmU8, DataType::Signed32>,
false,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(Convolution2d2x2Dilation2x2Padding2x2Stride3x3,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(Convolution2d2x2Dilation2x2Padding2x2Stride3x3Uint8,
Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test
- <DataType::QuantisedAsymm8, DataType::Signed32>,
+ <DataType::QAsymmU8, DataType::Signed32>,
false,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(Convolution2d2x2Dilation2x2Padding2x2Stride3x3NhwcUint8,
Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test
- <DataType::QuantisedAsymm8, DataType::Signed32>,
+ <DataType::QAsymmU8, DataType::Signed32>,
false,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2dMult4,
ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwFloat16_3, DepthToSpaceTest3<DataType::Float16>, DataLayout::NCHW);
ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwFloat16_4, DepthToSpaceTest4<DataType::Float16>, DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwUint8_1, DepthToSpaceTest1<DataType::QuantisedAsymm8>, DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwUint8_2, DepthToSpaceTest2<DataType::QuantisedAsymm8>, DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwUint8_3, DepthToSpaceTest3<DataType::QuantisedAsymm8>, DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwUint8_4, DepthToSpaceTest4<DataType::QuantisedAsymm8>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwUint8_1, DepthToSpaceTest1<DataType::QAsymmU8>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwUint8_2, DepthToSpaceTest2<DataType::QAsymmU8>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwUint8_3, DepthToSpaceTest3<DataType::QAsymmU8>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwUint8_4, DepthToSpaceTest4<DataType::QAsymmU8>, DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt16_1, DepthToSpaceTest1<DataType::QuantisedSymm16>, DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt16_2, DepthToSpaceTest2<DataType::QuantisedSymm16>, DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt16_3, DepthToSpaceTest3<DataType::QuantisedSymm16>, DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt16_4, DepthToSpaceTest4<DataType::QuantisedSymm16>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt16_1, DepthToSpaceTest1<DataType::QSymmS16>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt16_2, DepthToSpaceTest2<DataType::QSymmS16>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt16_3, DepthToSpaceTest3<DataType::QSymmS16>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt16_4, DepthToSpaceTest4<DataType::QSymmS16>, DataLayout::NCHW);
ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcFloat32_1, DepthToSpaceTest1<DataType::Float32>, DataLayout::NHWC);
ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcFloat32_2, DepthToSpaceTest2<DataType::Float32>, DataLayout::NHWC);
ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcFloat16_3, DepthToSpaceTest3<DataType::Float16>, DataLayout::NHWC);
ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcFloat16_4, DepthToSpaceTest4<DataType::Float16>, DataLayout::NHWC);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcUint8_1, DepthToSpaceTest1<DataType::QuantisedAsymm8>, DataLayout::NHWC);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcUint8_2, DepthToSpaceTest2<DataType::QuantisedAsymm8>, DataLayout::NHWC);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcUint8_3, DepthToSpaceTest3<DataType::QuantisedAsymm8>, DataLayout::NHWC);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcUint8_4, DepthToSpaceTest4<DataType::QuantisedAsymm8>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcUint8_1, DepthToSpaceTest1<DataType::QAsymmU8>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcUint8_2, DepthToSpaceTest2<DataType::QAsymmU8>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcUint8_3, DepthToSpaceTest3<DataType::QAsymmU8>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcUint8_4, DepthToSpaceTest4<DataType::QAsymmU8>, DataLayout::NHWC);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt16_1, DepthToSpaceTest1<DataType::QuantisedSymm16>, DataLayout::NHWC);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt16_2, DepthToSpaceTest2<DataType::QuantisedSymm16>, DataLayout::NHWC);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt16_3, DepthToSpaceTest3<DataType::QuantisedSymm16>, DataLayout::NHWC);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt16_4, DepthToSpaceTest4<DataType::QuantisedSymm16>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt16_1, DepthToSpaceTest1<DataType::QSymmS16>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt16_2, DepthToSpaceTest2<DataType::QSymmS16>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt16_3, DepthToSpaceTest3<DataType::QSymmS16>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt16_4, DepthToSpaceTest4<DataType::QSymmS16>, DataLayout::NHWC);
// Depthwise Convolution
ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2dDepthMul1,
ARMNN_AUTO_TEST_CASE(SimpleConcat, ConcatTest)
ARMNN_AUTO_TEST_CASE(ConcatUint8, ConcatUint8Test)
ARMNN_AUTO_TEST_CASE(ConcatUint8DifferentInputOutputQParam,
- ConcatDifferentInputOutputQParamTest<DataType::QuantisedAsymm8>, false)
+ ConcatDifferentInputOutputQParamTest<DataType::QAsymmU8>, false)
// Fully Connected
ARMNN_AUTO_TEST_CASE(SimpleFullyConnected, FullyConnectedFloat32Test, false, false)
ARMNN_AUTO_TEST_CASE(SimpleFullyConnectedWithTranspose, FullyConnectedFloat32Test, false, true)
ARMNN_AUTO_TEST_CASE(FullyConnectedLarge, FullyConnectedLargeTest, false)
ARMNN_AUTO_TEST_CASE(FullyConnectedLargeTransposed, FullyConnectedLargeTest, true)
-ARMNN_AUTO_TEST_CASE(FullyConnectedUint8, FullyConnectedTest<DataType::QuantisedAsymm8>, false)
-ARMNN_AUTO_TEST_CASE(FullyConnectedBiasedUint8, FullyConnectedTest<DataType::QuantisedAsymm8>, true)
+ARMNN_AUTO_TEST_CASE(FullyConnectedUint8, FullyConnectedTest<DataType::QAsymmU8>, false)
+ARMNN_AUTO_TEST_CASE(FullyConnectedBiasedUint8, FullyConnectedTest<DataType::QAsymmU8>, true)
// Add
ARMNN_AUTO_TEST_CASE(SimpleAdd, AdditionTest)
// Reshape
ARMNN_AUTO_TEST_CASE(SimpleReshapeFloat32, SimpleReshapeTest<armnn::DataType::Float32>)
-ARMNN_AUTO_TEST_CASE(SimpleReshapeUint8, SimpleReshapeTest<armnn::DataType::QuantisedAsymm8>)
+ARMNN_AUTO_TEST_CASE(SimpleReshapeUint8, SimpleReshapeTest<armnn::DataType::QAsymmU8>)
ARMNN_AUTO_TEST_CASE(Reshape5d, Reshape5dTest<armnn::DataType::Float32>)
// Pad
ARMNN_AUTO_TEST_CASE(PadUint83d, PadUint83dTest)
ARMNN_AUTO_TEST_CASE(PadUint84d, PadUint84dTest)
-ARMNN_AUTO_TEST_CASE(Pad2dQSymm16, Pad2dTestCommon<DataType::QuantisedSymm16>, 2.0f, 0, 0.0f)
-ARMNN_AUTO_TEST_CASE(Pad2dQSymm16CustomPadding, Pad2dTestCommon<DataType::QuantisedSymm16>, 2.0f, 0, 1.0f)
-ARMNN_AUTO_TEST_CASE(Pad3dQSymm16, Pad3dTestCommon<DataType::QuantisedSymm16>, 2.0f, 0)
-ARMNN_AUTO_TEST_CASE(Pad4dQSymm16, Pad4dTestCommon<DataType::QuantisedSymm16>, 2.0f, 0)
+ARMNN_AUTO_TEST_CASE(Pad2dQSymm16, Pad2dTestCommon<DataType::QSymmS16>, 2.0f, 0, 0.0f)
+ARMNN_AUTO_TEST_CASE(Pad2dQSymm16CustomPadding, Pad2dTestCommon<DataType::QSymmS16>, 2.0f, 0, 1.0f)
+ARMNN_AUTO_TEST_CASE(Pad3dQSymm16, Pad3dTestCommon<DataType::QSymmS16>, 2.0f, 0)
+ARMNN_AUTO_TEST_CASE(Pad4dQSymm16, Pad4dTestCommon<DataType::QSymmS16>, 2.0f, 0)
// Permute
ARMNN_AUTO_TEST_CASE(SimplePermuteFloat32, SimplePermuteTest<DataType::Float32>)
ARMNN_AUTO_TEST_CASE(PermuteFloat32ValueSet1Test, PermuteValueSet1Test<DataType::Float32>)
ARMNN_AUTO_TEST_CASE(PermuteFloat32ValueSet2Test, PermuteValueSet2Test<DataType::Float32>)
ARMNN_AUTO_TEST_CASE(PermuteFloat32ValueSet3Test, PermuteValueSet3Test<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE(SimplePermuteQASymm8, SimplePermuteTest<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(PermuteQASymm8ValueSet1Test, PermuteValueSet1Test<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(PermuteQASymm8ValueSet2Test, PermuteValueSet2Test<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(PermuteQASymm8ValueSet3Test, PermuteValueSet3Test<DataType::QuantisedAsymm8>)
+ARMNN_AUTO_TEST_CASE(SimplePermuteQASymm8, SimplePermuteTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(PermuteQASymm8ValueSet1Test, PermuteValueSet1Test<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(PermuteQASymm8ValueSet2Test, PermuteValueSet2Test<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(PermuteQASymm8ValueSet3Test, PermuteValueSet3Test<DataType::QAsymmU8>)
// Lstm
ARMNN_AUTO_TEST_CASE(LstmLayerFloat32WithCifgWithPeepholeNoProjection,
ARMNN_AUTO_TEST_CASE(MeanVts2Float32, MeanVts2Test<DataType::Float32>)
ARMNN_AUTO_TEST_CASE(MeanVts3Float32, MeanVts3Test<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE(MeanSimpleQuantisedAsymm8, MeanSimpleTest<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(MeanSimpleAxisQuantisedAsymm8, MeanSimpleAxisTest<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(MeanKeepDimsQuantisedAsymm8, MeanKeepDimsTest<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(MeanMultipleDimsQuantisedAsymm8, MeanMultipleDimsTest<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(MeanVts1QuantisedAsymm8, MeanVts1Test<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(MeanVts2QuantisedAsymm8, MeanVts2Test<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(MeanVts3QuantisedAsymm8, MeanVts3Test<DataType::QuantisedAsymm8>)
+ARMNN_AUTO_TEST_CASE(MeanSimpleQuantisedAsymm8, MeanSimpleTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(MeanSimpleAxisQuantisedAsymm8, MeanSimpleAxisTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(MeanKeepDimsQuantisedAsymm8, MeanKeepDimsTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(MeanMultipleDimsQuantisedAsymm8, MeanMultipleDimsTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(MeanVts1QuantisedAsymm8, MeanVts1Test<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(MeanVts2QuantisedAsymm8, MeanVts2Test<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(MeanVts3QuantisedAsymm8, MeanVts3Test<DataType::QAsymmU8>)
// Max
ARMNN_AUTO_TEST_CASE(SimpleMaximum, MaximumSimpleTest)
ARMNN_AUTO_TEST_CASE(ResizeBilinearMag, ResizeBilinearMagTest<DataType::Float32>, DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearUint8,
- SimpleResizeBilinearTest<DataType::QuantisedAsymm8>,
+ SimpleResizeBilinearTest<DataType::QAsymmU8>,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(ResizeBilinearNopUint8,
- ResizeBilinearNopTest<DataType::QuantisedAsymm8>,
+ ResizeBilinearNopTest<DataType::QAsymmU8>,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinUint8,
- ResizeBilinearSqMinTest<DataType::QuantisedAsymm8>,
+ ResizeBilinearSqMinTest<DataType::QAsymmU8>,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(ResizeBilinearMinUint8,
- ResizeBilinearMinTest<DataType::QuantisedAsymm8>,
+ ResizeBilinearMinTest<DataType::QAsymmU8>,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(ResizeBilinearMagUint8,
- ResizeBilinearMagTest<DataType::QuantisedAsymm8>,
+ ResizeBilinearMagTest<DataType::QAsymmU8>,
DataLayout::NCHW)
// Resize Bilinear - NHWC data layout
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(ResizeBilinearNopUint8Nhwc,
- ResizeBilinearNopTest<DataType::QuantisedAsymm8>,
+ ResizeBilinearNopTest<DataType::QAsymmU8>,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearUint8Nhwc,
- SimpleResizeBilinearTest<DataType::QuantisedAsymm8>,
+ SimpleResizeBilinearTest<DataType::QAsymmU8>,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinUint8Nhwc,
- ResizeBilinearSqMinTest<DataType::QuantisedAsymm8>,
+ ResizeBilinearSqMinTest<DataType::QAsymmU8>,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(ResizeBilinearMinUint8Nhwc,
- ResizeBilinearMinTest<DataType::QuantisedAsymm8>,
+ ResizeBilinearMinTest<DataType::QAsymmU8>,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(ResizeBilinearMagUint8Nhwc,
- ResizeBilinearMagTest<DataType::QuantisedAsymm8>,
+ ResizeBilinearMagTest<DataType::QAsymmU8>,
DataLayout::NHWC)
// Resize NearestNeighbor - NCHW
DataLayout::NCHW, 0.1f, 50, 0.1f, 50)
ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighborUint8,
- SimpleResizeNearestNeighborTest<DataType::QuantisedAsymm8>,
+ SimpleResizeNearestNeighborTest<DataType::QAsymmU8>,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborNopUint8,
- ResizeNearestNeighborNopTest<DataType::QuantisedAsymm8>,
+ ResizeNearestNeighborNopTest<DataType::QAsymmU8>,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMinUint8,
- ResizeNearestNeighborSqMinTest<DataType::QuantisedAsymm8>,
+ ResizeNearestNeighborSqMinTest<DataType::QAsymmU8>,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMinUint8,
- ResizeNearestNeighborMinTest<DataType::QuantisedAsymm8>,
+ ResizeNearestNeighborMinTest<DataType::QAsymmU8>,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMagUint8,
- ResizeNearestNeighborMagTest<DataType::QuantisedAsymm8>,
+ ResizeNearestNeighborMagTest<DataType::QAsymmU8>,
DataLayout::NCHW, 0.1f, 50, 0.1f, 50)
// Resize NearestNeighbor - NHWC
DataLayout::NHWC, 0.1f, 50, 0.1f, 50)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborNopUint8Nhwc,
- ResizeNearestNeighborNopTest<DataType::QuantisedAsymm8>,
+ ResizeNearestNeighborNopTest<DataType::QAsymmU8>,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighborUint8Nhwc,
- SimpleResizeNearestNeighborTest<DataType::QuantisedAsymm8>,
+ SimpleResizeNearestNeighborTest<DataType::QAsymmU8>,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMinUint8Nhwc,
- ResizeNearestNeighborSqMinTest<DataType::QuantisedAsymm8>,
+ ResizeNearestNeighborSqMinTest<DataType::QAsymmU8>,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMinUint8Nhwc,
- ResizeNearestNeighborMinTest<DataType::QuantisedAsymm8>,
+ ResizeNearestNeighborMinTest<DataType::QAsymmU8>,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMagUint8Nhwc,
- ResizeNearestNeighborMagTest<DataType::QuantisedAsymm8>,
+ ResizeNearestNeighborMagTest<DataType::QAsymmU8>,
DataLayout::NHWC, 0.1f, 50, 0.1f, 50)
// Slice
// PReLU
ARMNN_AUTO_TEST_CASE(PreluFloat32, PreluTest<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE(PreluUint8, PreluTest<DataType::QuantisedAsymm8>)
+ARMNN_AUTO_TEST_CASE(PreluUint8, PreluTest<DataType::QAsymmU8>)
// Stack
ARMNN_AUTO_TEST_CASE(Stack0Axis, StackAxis0Float32Test)
true,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(SimpleTransposeConvolution2dUint8Nchw,
- SimpleTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+ SimpleTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
true,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(SimpleTransposeConvolution2dUint8Nhwc,
- SimpleTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+ SimpleTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
true,
DataLayout::NHWC)
true,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(UnbiasedSimpleTransposeConvolution2dUint8Nchw,
- SimpleTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+ SimpleTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
true,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(UnbiasedSimpleTransposeConvolution2dUint8Nhwc,
- SimpleTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+ SimpleTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
true,
DataLayout::NHWC)
true,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(PaddedTransposeConvolution2dUint8Nchw,
- PaddedTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+ PaddedTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
true,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(PaddedTransposeConvolution2dUint8Nhwc,
- PaddedTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+ PaddedTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
true,
DataLayout::NHWC)
true,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(UnbiasedPaddedTransposeConvolution2dUint8Nchw,
- PaddedTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+ PaddedTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
true,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(UnbiasedPaddedTransposeConvolution2dUint8Nhwc,
- PaddedTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+ PaddedTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
true,
DataLayout::NHWC)
true,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(StridedTransposeConvolution2dUint8Nchw,
- StridedTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+ StridedTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
true,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(StridedTransposeConvolution2dUint8Nhwc,
- StridedTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+ StridedTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
true,
DataLayout::NHWC)
true,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(UnbiasedStridedTransposeConvolution2dUint8Nchw,
- StridedTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+ StridedTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
true,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(UnbiasedStridedTransposeConvolution2dUint8Nhwc,
- StridedTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+ StridedTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
true,
DataLayout::NHWC)
MultiChannelTransposeConvolution2dTest<DataType::Float32, DataType::Float32>,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(MultiChannelTransposeConvolution2dUint8Nchw,
- MultiChannelTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+ MultiChannelTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(MultiChannelTransposeConvolution2dUint8Nhwc,
- MultiChannelTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+ MultiChannelTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
DataLayout::NHWC)
// Abs
ARMNN_AUTO_TEST_CASE(ArgMaxHeight, ArgMaxHeightTest<DataType::Float32>)
ARMNN_AUTO_TEST_CASE(ArgMinWidth, ArgMinWidthTest<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE(ArgMinQAsymm8, ArgMinSimpleTest<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(ArgMaxQAsymm8, ArgMaxSimpleTest<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(ArgMinChannelQAsymm8, ArgMinChannelTest<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(ArgMaxChannelQAsymm8, ArgMaxChannelTest<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(ArgMaxHeightQAsymm8, ArgMaxHeightTest<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(ArgMinWidthQAsymm8, ArgMinWidthTest<DataType::QuantisedAsymm8>)
+ARMNN_AUTO_TEST_CASE(ArgMinQAsymm8, ArgMinSimpleTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(ArgMaxQAsymm8, ArgMaxSimpleTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(ArgMinChannelQAsymm8, ArgMinChannelTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(ArgMaxChannelQAsymm8, ArgMaxChannelTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(ArgMaxHeightQAsymm8, ArgMaxHeightTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(ArgMinWidthQAsymm8, ArgMinWidthTest<DataType::QAsymmU8>)
#if defined(ARMNNREF_ENABLED)
m_FullyConnectedLayer.reset(layer.release());
// Allocate
- if (m_Data.m_Weight->GetTensorInfo().GetDataType() == DataType::QuantisedAsymm8)
+ if (m_Data.m_Weight->GetTensorInfo().GetDataType() == DataType::QAsymmU8)
{
InitializeArmComputeTensorData(*m_WeightsTensor, m_Data.m_Weight);
}
}
template class NeonGreaterWorkload<DataType::Float32>;
-template class NeonGreaterWorkload<DataType::QuantisedAsymm8>;
+template class NeonGreaterWorkload<DataType::QAsymmU8>;
} //namespace armnn
\ No newline at end of file
};
using NeonGreaterFloat32Workload = NeonGreaterWorkload<DataType::Float32>;
-using NeonGreaterUint8Workload = NeonGreaterWorkload<DataType::QuantisedAsymm8>;
+using NeonGreaterUint8Workload = NeonGreaterWorkload<DataType::QAsymmU8>;
} //namespace armnn
\ No newline at end of file
case DataType::Float32:
CopyArmComputeTensorData(tensor, handle->GetConstTensor<float>());
break;
- case DataType::QuantisedAsymm8:
+ case DataType::QAsymmU8:
CopyArmComputeTensorData(tensor, handle->GetConstTensor<uint8_t>());
break;
case DataType::QuantizedSymm8PerAxis:
{
DataType::Float32,
DataType::Float16,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
std::array<DataType,4> supportedTypes = {
DataType::Float32,
DataType::Float16,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
std::array<DataType,4> supportedTypes = {
DataType::Float32,
DataType::Float16,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
std::array<DataType, 4> supportedTypes =
{
DataType::Float32,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16,
+ DataType::QAsymmU8,
+ DataType::QSymmS16,
DataType::Signed32
};
{
DataType::Float32,
DataType::Float16,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
bool supported = true;
{
DataType::Float32,
DataType::Float16,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
{
DataType::Float32,
DataType::Float16,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
bool supported = true;
{
DataType::Float32,
DataType::Float16,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
{
DataType::Float32,
DataType::Signed32,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
return CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
{
DataType::Float32,
DataType::Float16,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
"Reference convolution2d: input and output types mismatched.");
const DataType inputType = input.GetDataType();
- if (inputType == DataType::QuantisedAsymm8)
+ if (inputType == DataType::QAsymmU8)
{
std::array<DataType, 2> supportedWeightTypes =
{
- DataType::QuantisedAsymm8,
+ DataType::QAsymmU8,
DataType::QuantizedSymm8PerAxis
};
{
DataType::Float16,
DataType::Float32,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
{
DataType::Float32,
DataType::Float16,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
{
DataType::Float32,
DataType::Float16,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
"Reference DepthwiseConvolution2d: input and output types mismatched.");
const DataType inputType = input.GetDataType();
- if (inputType == DataType::QuantisedAsymm8)
+ if (inputType == DataType::QAsymmU8)
{
std::array<DataType, 2> supportedWeightTypes =
{
- DataType::QuantisedAsymm8,
+ DataType::QAsymmU8,
DataType::QuantizedSymm8PerAxis
};
bool supported = true;
std::array<DataType,3> supportedInputTypes = {
- DataType::QuantisedAsymm8,
+ DataType::QAsymmU8,
DataType::QSymmS8,
- DataType::QuantisedSymm16
+ DataType::QSymmS16
};
supported &= CheckSupportRule(TypeAnyOf(input, supportedInputTypes), reasonIfUnsupported,
std::array<DataType,3> supportedInputTypes =
{
DataType::Float32,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
supported &= CheckSupportRule(TypeAnyOf(boxEncodings, supportedInputTypes), reasonIfUnsupported,
std::array<DataType,4> supportedTypes = {
DataType::Float32,
DataType::Float16,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
{
DataType::Float32,
DataType::Float16,
- DataType::QuantisedSymm16
+ DataType::QSymmS16
};
supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
{
DataType::Float32,
DataType::Float16,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
{
DataType::Float32,
DataType::Float16,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
{
DataType::Float32,
DataType::Float16,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
bool supported = true;
std::array<DataType,2> supportedTypes = {
DataType::Float32,
- DataType::QuantisedSymm16
+ DataType::QSymmS16
};
// check inputs and outputs
std::array<DataType,4> supportedTypes = {
DataType::Float32,
DataType::Float16,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
{
DataType::Float32,
DataType::Float16,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
{
DataType::Float32,
DataType::Float16,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16,
+ DataType::QAsymmU8,
+ DataType::QSymmS16,
DataType::Boolean
};
std::array<DataType,4> supportedTypes = {
DataType::Float32,
DataType::Float16,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
std::array<DataType,4> supportedTypes = {
DataType::Float32,
DataType::Float16,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
{
DataType::Float16,
DataType::Float32,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
bool supported = true;
{
DataType::Float32,
DataType::Float16,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
std::array<DataType,3> supportedTypes =
{
DataType::Float32,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
{
DataType::Float32,
DataType::Float16,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
// Define supported output types.
std::array<DataType,3> supportedOutputTypes = {
- DataType::QuantisedAsymm8,
+ DataType::QAsymmU8,
DataType::QSymmS8,
- DataType::QuantisedSymm16
+ DataType::QSymmS16
};
supported &= CheckSupportRule(TypeAnyOf(output, supportedOutputTypes), reasonIfUnsupported,
"Reference quantize: output type not supported.");
DataType::Float32,
DataType::Float16,
DataType::Signed32,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
return CheckSupportRule(TypeAnyOf(input, supportedOutputTypes), reasonIfUnsupported,
"Reference reshape: input type not supported.");
{
DataType::Float32,
DataType::Float16,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
{
DataType::Float32,
DataType::Float16,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
{
DataType::Float32,
DataType::Float16,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
std::array<DataType, 3> supportedTypes =
{
DataType::Float32,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
{
DataType::Float32,
DataType::Float16,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
{
DataType::Float32,
DataType::Float16,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
{
DataType::Float32,
DataType::Float16,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
{
DataType::Float32,
DataType::Float16,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
{
DataType::Float32,
DataType::Float16,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
{
DataType::Float32,
DataType::Float16,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
std::array<DataType,3> supportedTypes =
{
DataType::Float32,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
std::array<DataType,4> supportedTypes = {
DataType::Float32,
DataType::Float16,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
{
DataType::Float32,
DataType::Float16,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
{
DataType::Float32,
DataType::Float16,
- DataType::QuantisedAsymm8,
- DataType::QuantisedSymm16
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
const DataType inputType = input.GetDataType();
- if (inputType == DataType::QuantisedAsymm8)
+ if (inputType == DataType::QAsymmU8)
{
std::array<DataType, 2> supportedWeightTypes =
{
- DataType::QuantisedAsymm8,
+ DataType::QAsymmU8,
DataType::QuantizedSymm8PerAxis
};
bool IsQSymm16(const WorkloadInfo& info)
{
- return IsDataType<DataType::QuantisedSymm16>(info);
+ return IsDataType<DataType::QSymmS16>(info);
}
RefWorkloadFactory::RefWorkloadFactory(const std::shared_ptr<RefMemoryManager>& memoryManager)
BOOST_AUTO_TEST_CASE(CreateActivationUint8Workload)
{
- RefCreateActivationWorkloadTest<RefActivationWorkload, armnn::DataType::QuantisedAsymm8>();
+ RefCreateActivationWorkloadTest<RefActivationWorkload, armnn::DataType::QAsymmU8>();
}
template <typename WorkloadType,
RefCreateElementwiseWorkloadTest<RefAdditionWorkload,
AdditionQueueDescriptor,
AdditionLayer,
- armnn::DataType::QuantisedAsymm8>();
+ armnn::DataType::QAsymmU8>();
}
BOOST_AUTO_TEST_CASE(CreateAdditionInt16Workload)
RefCreateElementwiseWorkloadTest<RefAdditionWorkload,
AdditionQueueDescriptor,
AdditionLayer,
- armnn::DataType::QuantisedSymm16>();
+ armnn::DataType::QSymmS16>();
}
BOOST_AUTO_TEST_CASE(CreateSubtractionFloat32Workload)
RefCreateElementwiseWorkloadTest<RefSubtractionWorkload,
SubtractionQueueDescriptor,
SubtractionLayer,
- armnn::DataType::QuantisedAsymm8>();
+ armnn::DataType::QAsymmU8>();
}
BOOST_AUTO_TEST_CASE(CreateSubtractionInt16Workload)
RefCreateElementwiseWorkloadTest<RefSubtractionWorkload,
SubtractionQueueDescriptor,
SubtractionLayer,
- armnn::DataType::QuantisedSymm16>();
+ armnn::DataType::QSymmS16>();
}
BOOST_AUTO_TEST_CASE(CreateMultiplicationFloatWorkload)
RefCreateElementwiseWorkloadTest<RefMultiplicationWorkload,
MultiplicationQueueDescriptor,
MultiplicationLayer,
- armnn::DataType::QuantisedAsymm8>();
+ armnn::DataType::QAsymmU8>();
}
BOOST_AUTO_TEST_CASE(CreateMultiplicationInt16Workload)
RefCreateElementwiseWorkloadTest<RefMultiplicationWorkload,
MultiplicationQueueDescriptor,
MultiplicationLayer,
- armnn::DataType::QuantisedSymm16>();
+ armnn::DataType::QSymmS16>();
}
BOOST_AUTO_TEST_CASE(CreateDivisionFloat32Workload)
RefCreateElementwiseWorkloadTest<RefDivisionWorkload,
DivisionQueueDescriptor,
DivisionLayer,
- armnn::DataType::QuantisedAsymm8>();
+ armnn::DataType::QAsymmU8>();
}
BOOST_AUTO_TEST_CASE(CreateDivisionInt16Workload)
RefCreateElementwiseWorkloadTest<RefDivisionWorkload,
DivisionQueueDescriptor,
DivisionLayer,
- armnn::DataType::QuantisedSymm16>();
+ armnn::DataType::QSymmS16>();
}
template <typename BatchNormalizationWorkloadType, armnn::DataType DataType>
BOOST_AUTO_TEST_CASE(CreateBatchNormalizationUint8Workload)
{
- RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationWorkload, armnn::DataType::QuantisedAsymm8>
+ RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationWorkload, armnn::DataType::QAsymmU8>
(DataLayout::NCHW);
}
BOOST_AUTO_TEST_CASE(CreateBatchNormalizationUint8WorkloadNhwc)
{
- RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationWorkload, armnn::DataType::QuantisedAsymm8>
+ RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationWorkload, armnn::DataType::QAsymmU8>
(DataLayout::NHWC);
}
BOOST_AUTO_TEST_CASE(CreateBatchNormalizationInt16Workload)
{
- RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationWorkload, armnn::DataType::QuantisedSymm16>
+ RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationWorkload, armnn::DataType::QSymmS16>
(DataLayout::NCHW);
}
BOOST_AUTO_TEST_CASE(CreateBatchNormalizationInt16WorkloadNhwc)
{
- RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationWorkload, armnn::DataType::QuantisedSymm16>
+ RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationWorkload, armnn::DataType::QSymmS16>
(DataLayout::NHWC);
}
auto workload = CreateFullyConnectedWorkloadTest<FullyConnectedWorkloadType, DataType>(factory, graph);
// Checks that outputs and inputs are as we expect them (see definition of CreateFullyConnectedWorkloadTest).
- float inputsQScale = DataType == armnn::DataType::QuantisedAsymm8 ? 1.0f : 0.0;
- float outputQScale = DataType == armnn::DataType::QuantisedAsymm8 ? 2.0f : 0.0;
+ float inputsQScale = DataType == armnn::DataType::QAsymmU8 ? 1.0f : 0.0;
+ float outputQScale = DataType == armnn::DataType::QAsymmU8 ? 2.0f : 0.0;
CheckInputOutput(std::move(workload),
TensorInfo({ 3, 1, 4, 5 }, DataType, inputsQScale),
TensorInfo({ 3, 7 }, DataType, outputQScale));
BOOST_AUTO_TEST_CASE(CreateFullyConnectedWorkloadQuantisedAsymm8)
{
- RefCreateFullyConnectedWorkloadTest<RefFullyConnectedWorkload, armnn::DataType::QuantisedAsymm8>();
+ RefCreateFullyConnectedWorkloadTest<RefFullyConnectedWorkload, armnn::DataType::QAsymmU8>();
}
BOOST_AUTO_TEST_CASE(CreateFullyConnectedWorkloadQuantisedSymm16)
{
- RefCreateFullyConnectedWorkloadTest<RefFullyConnectedWorkload, armnn::DataType::QuantisedSymm16>();
+ RefCreateFullyConnectedWorkloadTest<RefFullyConnectedWorkload, armnn::DataType::QSymmS16>();
}
template <typename NormalizationWorkloadType, armnn::DataType DataType>
BOOST_AUTO_TEST_CASE(CreateRefNormalizationUint8NchwWorkload)
{
- RefCreateNormalizationWorkloadTest<RefNormalizationWorkload, armnn::DataType::QuantisedAsymm8>(DataLayout::NCHW);
+ RefCreateNormalizationWorkloadTest<RefNormalizationWorkload, armnn::DataType::QAsymmU8>(DataLayout::NCHW);
}
BOOST_AUTO_TEST_CASE(CreateRefNormalizationUint8NhwcWorkload)
{
- RefCreateNormalizationWorkloadTest<RefNormalizationWorkload, armnn::DataType::QuantisedAsymm8>(DataLayout::NHWC);
+ RefCreateNormalizationWorkloadTest<RefNormalizationWorkload, armnn::DataType::QAsymmU8>(DataLayout::NHWC);
}
BOOST_AUTO_TEST_CASE(CreateRefNormalizationInt16NchwWorkload)
{
- RefCreateNormalizationWorkloadTest<RefNormalizationWorkload, armnn::DataType::QuantisedSymm16>(DataLayout::NCHW);
+ RefCreateNormalizationWorkloadTest<RefNormalizationWorkload, armnn::DataType::QSymmS16>(DataLayout::NCHW);
}
BOOST_AUTO_TEST_CASE(CreateRefNormalizationInt16NhwcWorkload)
{
- RefCreateNormalizationWorkloadTest<RefNormalizationWorkload, armnn::DataType::QuantisedSymm16>(DataLayout::NHWC);
+ RefCreateNormalizationWorkloadTest<RefNormalizationWorkload, armnn::DataType::QSymmS16>(DataLayout::NHWC);
}
template <typename Pooling2dWorkloadType, armnn::DataType DataType>
BOOST_AUTO_TEST_CASE(CreatePooling2dUint8Workload)
{
- RefCreatePooling2dWorkloadTest<RefPooling2dWorkload, armnn::DataType::QuantisedAsymm8>(DataLayout::NCHW);
+ RefCreatePooling2dWorkloadTest<RefPooling2dWorkload, armnn::DataType::QAsymmU8>(DataLayout::NCHW);
}
BOOST_AUTO_TEST_CASE(CreatePooling2dUint8NhwcWorkload)
{
- RefCreatePooling2dWorkloadTest<RefPooling2dWorkload, armnn::DataType::QuantisedAsymm8>(DataLayout::NHWC);
+ RefCreatePooling2dWorkloadTest<RefPooling2dWorkload, armnn::DataType::QAsymmU8>(DataLayout::NHWC);
}
BOOST_AUTO_TEST_CASE(CreatePooling2dInt16Workload)
{
- RefCreatePooling2dWorkloadTest<RefPooling2dWorkload, armnn::DataType::QuantisedSymm16>(DataLayout::NCHW);
+ RefCreatePooling2dWorkloadTest<RefPooling2dWorkload, armnn::DataType::QSymmS16>(DataLayout::NCHW);
}
BOOST_AUTO_TEST_CASE(CreatePooling2dInt16NhwcWorkload)
{
- RefCreatePooling2dWorkloadTest<RefPooling2dWorkload, armnn::DataType::QuantisedSymm16>(DataLayout::NHWC);
+ RefCreatePooling2dWorkloadTest<RefPooling2dWorkload, armnn::DataType::QSymmS16>(DataLayout::NHWC);
}
template <typename SoftmaxWorkloadType, armnn::DataType DataType>
BOOST_AUTO_TEST_CASE(CreateSoftmaxQuantisedAsymm8Workload)
{
- RefCreateSoftmaxWorkloadTest<RefSoftmaxWorkload, armnn::DataType::QuantisedAsymm8>();
+ RefCreateSoftmaxWorkloadTest<RefSoftmaxWorkload, armnn::DataType::QAsymmU8>();
}
BOOST_AUTO_TEST_CASE(CreateSoftmaxQuantisedSymm16Workload)
{
- RefCreateSoftmaxWorkloadTest<RefSoftmaxWorkload, armnn::DataType::QuantisedSymm16>();
+ RefCreateSoftmaxWorkloadTest<RefSoftmaxWorkload, armnn::DataType::QSymmS16>();
}
template <typename SplitterWorkloadType, armnn::DataType DataType>
BOOST_AUTO_TEST_CASE(CreateSplitterUint8Workload)
{
- RefCreateSplitterWorkloadTest<RefSplitterWorkload, armnn::DataType::QuantisedAsymm8>();
+ RefCreateSplitterWorkloadTest<RefSplitterWorkload, armnn::DataType::QAsymmU8>();
}
template <typename SplitterWorkloadType, typename ConcatWorkloadType, armnn::DataType DataType>
BOOST_AUTO_TEST_CASE(CreateSplitterConcatUint8)
{
- RefCreateSplitterConcatWorkloadTest<RefSplitterWorkload, RefConcatWorkload, DataType::QuantisedAsymm8>();
+ RefCreateSplitterConcatWorkloadTest<RefSplitterWorkload, RefConcatWorkload, DataType::QAsymmU8>();
}
template <typename SplitterWorkloadType, typename ActivationWorkloadType, armnn::DataType DataType>
BOOST_AUTO_TEST_CASE(CreateSingleOutputMultipleInputsUint8)
{
RefCreateSingleOutputMultipleInputsTest<RefSplitterWorkload, RefActivationWorkload,
- armnn::DataType::QuantisedAsymm8>();
+ armnn::DataType::QAsymmU8>();
}
template <typename ResizeBilinearWorkloadType, armnn::DataType DataType>
BOOST_AUTO_TEST_CASE(CreateResizeBilinearUint8)
{
- RefCreateResizeBilinearTest<RefResizeWorkload, armnn::DataType::QuantisedAsymm8>(DataLayout::NCHW);
+ RefCreateResizeBilinearTest<RefResizeWorkload, armnn::DataType::QAsymmU8>(DataLayout::NCHW);
}
BOOST_AUTO_TEST_CASE(CreateResizeBilinearQuantisedAsymm16)
{
- RefCreateResizeBilinearTest<RefResizeWorkload, armnn::DataType::QuantisedSymm16>(DataLayout::NCHW);
+ RefCreateResizeBilinearTest<RefResizeWorkload, armnn::DataType::QSymmS16>(DataLayout::NCHW);
}
BOOST_AUTO_TEST_CASE(CreateResizeBilinearFloat32Nhwc)
BOOST_AUTO_TEST_CASE(CreateRsqrtUint8)
{
- RefCreateRsqrtTest<RefRsqrtWorkload, armnn::DataType::QuantisedAsymm8>();
+ RefCreateRsqrtTest<RefRsqrtWorkload, armnn::DataType::QAsymmU8>();
}
BOOST_AUTO_TEST_CASE(CreateRsqrtQsymm16)
{
- RefCreateRsqrtTest<RefRsqrtWorkload, armnn::DataType::QuantisedSymm16>();
+ RefCreateRsqrtTest<RefRsqrtWorkload, armnn::DataType::QSymmS16>();
}
template <typename BatchToSpaceNdWorkloadType, armnn::DataType DataType>
BOOST_AUTO_TEST_CASE(CreateBatchToSpaceNdUint8)
{
- RefCreateBatchToSpaceNdTest<RefBatchToSpaceNdWorkload, armnn::DataType::QuantisedAsymm8>();
+ RefCreateBatchToSpaceNdTest<RefBatchToSpaceNdWorkload, armnn::DataType::QAsymmU8>();
}
BOOST_AUTO_TEST_CASE(CreateBatchToSpaceNdQSymm16)
{
- RefCreateBatchToSpaceNdTest<RefBatchToSpaceNdWorkload, armnn::DataType::QuantisedSymm16>();
+ RefCreateBatchToSpaceNdTest<RefBatchToSpaceNdWorkload, armnn::DataType::QSymmS16>();
}
template <typename L2NormalizationWorkloadType, armnn::DataType DataType>
BOOST_AUTO_TEST_CASE(CreateL2NormalizationInt16)
{
- RefCreateL2NormalizationTest<RefL2NormalizationWorkload, armnn::DataType::QuantisedSymm16>(DataLayout::NCHW);
+ RefCreateL2NormalizationTest<RefL2NormalizationWorkload, armnn::DataType::QSymmS16>(DataLayout::NCHW);
}
BOOST_AUTO_TEST_CASE(CreateL2NormalizationInt16Nhwc)
{
- RefCreateL2NormalizationTest<RefL2NormalizationWorkload, armnn::DataType::QuantisedSymm16>(DataLayout::NHWC);
+ RefCreateL2NormalizationTest<RefL2NormalizationWorkload, armnn::DataType::QSymmS16>(DataLayout::NHWC);
}
BOOST_AUTO_TEST_CASE(CreateL2NormalizationUint8)
{
- RefCreateL2NormalizationTest<RefL2NormalizationWorkload, armnn::DataType::QuantisedAsymm8>(DataLayout::NCHW);
+ RefCreateL2NormalizationTest<RefL2NormalizationWorkload, armnn::DataType::QAsymmU8>(DataLayout::NCHW);
}
BOOST_AUTO_TEST_CASE(CreateL2NormalizationUint8Nhwc)
{
- RefCreateL2NormalizationTest<RefL2NormalizationWorkload, armnn::DataType::QuantisedAsymm8>(DataLayout::NHWC);
+ RefCreateL2NormalizationTest<RefL2NormalizationWorkload, armnn::DataType::QAsymmU8>(DataLayout::NHWC);
}
template <typename ReshapeWorkloadType, armnn::DataType DataType>
BOOST_AUTO_TEST_CASE(CreateReshapeWorkloadQuantisedAsymm8)
{
- RefCreateReshapeWorkloadTest<RefReshapeWorkload, armnn::DataType::QuantisedAsymm8>();
+ RefCreateReshapeWorkloadTest<RefReshapeWorkload, armnn::DataType::QAsymmU8>();
}
BOOST_AUTO_TEST_CASE(CreateReshapeWorkloadQuantisedSymm16)
{
- RefCreateReshapeWorkloadTest<RefReshapeWorkload, armnn::DataType::QuantisedSymm16>();
+ RefCreateReshapeWorkloadTest<RefReshapeWorkload, armnn::DataType::QSymmS16>();
}
template <typename ConcatWorkloadType, armnn::DataType DataType>
BOOST_AUTO_TEST_CASE(CreateConcatDim0Uint8Workload)
{
- RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::QuantisedAsymm8>({ 4, 3, 2, 5 }, 0);
+ RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::QAsymmU8>({ 4, 3, 2, 5 }, 0);
}
BOOST_AUTO_TEST_CASE(CreateConcatDim0Uint16Workload)
{
- RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::QuantisedSymm16>({ 4, 3, 2, 5 }, 0);
+ RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::QSymmS16>({ 4, 3, 2, 5 }, 0);
}
BOOST_AUTO_TEST_CASE(CreateConcatDim1Float32Workload)
BOOST_AUTO_TEST_CASE(CreateConcatDim1Uint8Workload)
{
- RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::QuantisedAsymm8>({ 2, 6, 2, 5 }, 1);
+ RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::QAsymmU8>({ 2, 6, 2, 5 }, 1);
}
BOOST_AUTO_TEST_CASE(CreateConcatDim2Float32Workload)
BOOST_AUTO_TEST_CASE(CreateConcatDim2Uint8Workload)
{
- RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::QuantisedAsymm8>({ 2, 3, 4, 5 }, 2);
+ RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::QAsymmU8>({ 2, 3, 4, 5 }, 2);
}
BOOST_AUTO_TEST_CASE(CreateConcatDim3Float32Workload)
BOOST_AUTO_TEST_CASE(CreateConcatDim3Uint8Workload)
{
- RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::QuantisedAsymm8>({ 2, 3, 2, 10 }, 3);
+ RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::QAsymmU8>({ 2, 3, 2, 10 }, 3);
}
template <typename ConstantWorkloadType, armnn::DataType DataType>
BOOST_AUTO_TEST_CASE(CreateConstantUint8Workload)
{
- RefCreateConstantWorkloadTest<RefConstantWorkload, armnn::DataType::QuantisedAsymm8>({ 2, 3, 2, 10 });
+ RefCreateConstantWorkloadTest<RefConstantWorkload, armnn::DataType::QAsymmU8>({ 2, 3, 2, 10 });
}
BOOST_AUTO_TEST_CASE(CreateConstantInt16Workload)
{
- RefCreateConstantWorkloadTest<RefConstantWorkload, armnn::DataType::QuantisedSymm16>({ 2, 3, 2, 10 });
+ RefCreateConstantWorkloadTest<RefConstantWorkload, armnn::DataType::QSymmS16>({ 2, 3, 2, 10 });
}
BOOST_AUTO_TEST_CASE(CreateConstantFloat32Workload)
BOOST_AUTO_TEST_CASE(CreatePreluUint8Workload)
{
- RefCreatePreluWorkloadTest({ 1, 4, 1, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 }, armnn::DataType::QuantisedAsymm8);
+ RefCreatePreluWorkloadTest({ 1, 4, 1, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 }, armnn::DataType::QAsymmU8);
}
BOOST_AUTO_TEST_CASE(CreatePreluInt16Workload)
{
- RefCreatePreluWorkloadTest({ 1, 4, 1, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 }, armnn::DataType::QuantisedSymm16);
+ RefCreatePreluWorkloadTest({ 1, 4, 1, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 }, armnn::DataType::QSymmS16);
}
BOOST_AUTO_TEST_CASE(CreatePreluFloat32NoBroadcastWorkload)
BOOST_AUTO_TEST_CASE(CreatePreluUint8NoBroadcastWorkload)
{
BOOST_CHECK_THROW(RefCreatePreluWorkloadTest({ 1, 4, 7, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 },
- armnn::DataType::QuantisedAsymm8),
+ armnn::DataType::QAsymmU8),
armnn::InvalidArgumentException);
}
BOOST_AUTO_TEST_CASE(CreatePreluInt16NoBroadcastWorkload)
{
BOOST_CHECK_THROW(RefCreatePreluWorkloadTest({ 1, 4, 7, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 },
- armnn::DataType::QuantisedSymm16),
+ armnn::DataType::QSymmS16),
armnn::InvalidArgumentException);
}
BOOST_AUTO_TEST_CASE(CreateSpaceToDepthWorkloadQASymm8)
{
- RefCreateSpaceToDepthWorkloadTest<RefSpaceToDepthWorkload, armnn::DataType::QuantisedAsymm8>();
+ RefCreateSpaceToDepthWorkloadTest<RefSpaceToDepthWorkload, armnn::DataType::QAsymmU8>();
}
BOOST_AUTO_TEST_CASE(CreateSpaceToDepthWorkloadQSymm16)
{
- RefCreateSpaceToDepthWorkloadTest<RefSpaceToDepthWorkload, armnn::DataType::QuantisedSymm16>();
+ RefCreateSpaceToDepthWorkloadTest<RefSpaceToDepthWorkload, armnn::DataType::QSymmS16>();
}
template <armnn::DataType DataType>
BOOST_AUTO_TEST_CASE(CreateStackUint8Workload)
{
- RefCreateStackWorkloadTest<armnn::DataType::QuantisedAsymm8>({ 3, 4, 5 }, { 3, 4, 2, 5 }, 2, 2);
+ RefCreateStackWorkloadTest<armnn::DataType::QAsymmU8>({ 3, 4, 5 }, { 3, 4, 2, 5 }, 2, 2);
}
BOOST_AUTO_TEST_CASE(CreateStackUint16Workload)
{
- RefCreateStackWorkloadTest<armnn::DataType::QuantisedSymm16>({ 3, 4, 5 }, { 3, 4, 2, 5 }, 2, 2);
+ RefCreateStackWorkloadTest<armnn::DataType::QSymmS16>({ 3, 4, 5 }, { 3, 4, 2, 5 }, 2, 2);
}
BOOST_AUTO_TEST_SUITE_END()
BOOST_AUTO_TEST_CASE(RefAbsEndToEndTestUint8)
{
- AbsEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ AbsEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(RefAbsEndToEndTestInt16)
{
- AbsEndToEnd<armnn::DataType::QuantisedSymm16>(defaultBackends);
+ AbsEndToEnd<armnn::DataType::QSymmS16>(defaultBackends);
}
// Constant
softmax->GetOutputSlot(0).Connect(output->GetInputSlot(0));
// Sets the tensors in the network.
- TensorInfo inputTensorInfo(TensorShape({1, 5}), DataType::QuantisedAsymm8);
+ TensorInfo inputTensorInfo(TensorShape({1, 5}), DataType::QAsymmU8);
inputTensorInfo.SetQuantizationOffset(100);
inputTensorInfo.SetQuantizationScale(10000.0f);
input->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
- TensorInfo outputTensorInfo(TensorShape({1, 5}), DataType::QuantisedAsymm8);
+ TensorInfo outputTensorInfo(TensorShape({1, 5}), DataType::QAsymmU8);
outputTensorInfo.SetQuantizationOffset(0);
outputTensorInfo.SetQuantizationScale(1.0f/255.0f);
softmax->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
const std::vector<uint8_t> expectedOutput({ 1, 1, 1, 1, 0, 0, 0, 0,
0, 0, 0, 0, 1, 1, 1, 1 });
- ComparisonSimpleEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends,
+ ComparisonSimpleEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends,
ComparisonOperation::Equal,
expectedOutput);
}
const std::vector<uint8_t> expectedOutput({ 0, 0, 0, 0, 1, 1, 1, 1,
0, 0, 0, 0, 0, 0, 0, 0 });
- ComparisonSimpleEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends,
+ ComparisonSimpleEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends,
ComparisonOperation::Greater,
expectedOutput);
}
const std::vector<uint8_t > expectedOutput({ 1, 0, 1, 1, 0, 0,
0, 0, 0, 0, 0, 0 });
- ComparisonBroadcastEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends,
+ ComparisonBroadcastEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends,
ComparisonOperation::Equal,
expectedOutput);
}
const std::vector<uint8_t> expectedOutput({ 0, 1, 0, 0, 0, 1,
1, 1, 1, 1, 1, 1 });
- ComparisonBroadcastEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends,
+ ComparisonBroadcastEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends,
ComparisonOperation::Greater,
expectedOutput);
}
BOOST_AUTO_TEST_CASE(RefBatchToSpaceNdEndToEndUint8NHWCTest)
{
- BatchToSpaceNdEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends, armnn::DataLayout::NHWC);
+ BatchToSpaceNdEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, armnn::DataLayout::NHWC);
}
BOOST_AUTO_TEST_CASE(RefBatchToSpaceNdEndToEndQSymm16NHWCTest)
{
- BatchToSpaceNdEndToEnd<armnn::DataType::QuantisedSymm16>(defaultBackends, armnn::DataLayout::NHWC);
+ BatchToSpaceNdEndToEnd<armnn::DataType::QSymmS16>(defaultBackends, armnn::DataLayout::NHWC);
}
BOOST_AUTO_TEST_CASE(RefBatchToSpaceNdEndToEndFloat32NCHWTest)
BOOST_AUTO_TEST_CASE(RefBatchToSpaceNdEndToEndUint8NCHWTest)
{
- BatchToSpaceNdEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends, armnn::DataLayout::NCHW);
+ BatchToSpaceNdEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, armnn::DataLayout::NCHW);
}
BOOST_AUTO_TEST_CASE(RefBatchToSpaceNdEndToEndQSymm16NCHWTest)
{
- BatchToSpaceNdEndToEnd<armnn::DataType::QuantisedSymm16>(defaultBackends, armnn::DataLayout::NCHW);
+ BatchToSpaceNdEndToEnd<armnn::DataType::QSymmS16>(defaultBackends, armnn::DataLayout::NCHW);
}
BOOST_AUTO_TEST_CASE(RefBatchToSpaceNdEndToEndComplexFloat32NHWCTest)
BOOST_AUTO_TEST_CASE(RefBatchToSpaceNdEndToEndComplexUint8NHWCTest)
{
- BatchToSpaceNdComplexEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends, armnn::DataLayout::NHWC);
+ BatchToSpaceNdComplexEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, armnn::DataLayout::NHWC);
}
BOOST_AUTO_TEST_CASE(RefBatchToSpaceNdEndToEndComplexQSymm16NHWCTest)
{
- BatchToSpaceNdComplexEndToEnd<armnn::DataType::QuantisedSymm16>(defaultBackends, armnn::DataLayout::NHWC);
+ BatchToSpaceNdComplexEndToEnd<armnn::DataType::QSymmS16>(defaultBackends, armnn::DataLayout::NHWC);
}
BOOST_AUTO_TEST_CASE(RefBatchToSpaceNdEndToEndComplexFloat32NCHWTest)
BOOST_AUTO_TEST_CASE(RefBatchToSpaceNdEndToEndComplexUint8NCHWTest)
{
- BatchToSpaceNdComplexEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends, armnn::DataLayout::NCHW);
+ BatchToSpaceNdComplexEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, armnn::DataLayout::NCHW);
}
BOOST_AUTO_TEST_CASE(RefBatchToSpaceNdEndToEndComplexQSymm16NCHWTest)
{
- BatchToSpaceNdComplexEndToEnd<armnn::DataType::QuantisedSymm16>(defaultBackends, armnn::DataLayout::NCHW);
+ BatchToSpaceNdComplexEndToEnd<armnn::DataType::QSymmS16>(defaultBackends, armnn::DataLayout::NCHW);
}
BOOST_AUTO_TEST_CASE(RefConcatEndToEndDim0Test)
BOOST_AUTO_TEST_CASE(RefConcatEndToEndDim0Uint8Test)
{
- ConcatDim0EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ ConcatDim0EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(RefConcatEndToEndDim1Test)
BOOST_AUTO_TEST_CASE(RefConcatEndToEndDim1Uint8Test)
{
- ConcatDim1EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ ConcatDim1EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(RefConcatEndToEndDim2Test)
BOOST_AUTO_TEST_CASE(RefConcatEndToEndDim2Uint8Test)
{
- ConcatDim2EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ ConcatDim2EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(RefConcatEndToEndDim3Test)
BOOST_AUTO_TEST_CASE(RefConcatEndToEndDim3Uint8Test)
{
- ConcatDim3EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ ConcatDim3EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(RefGatherFloatTest)
BOOST_AUTO_TEST_CASE(RefGatherUint8Test)
{
- GatherEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ GatherEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(RefGatherInt16Test)
{
- GatherEndToEnd<armnn::DataType::QuantisedSymm16>(defaultBackends);
+ GatherEndToEnd<armnn::DataType::QSymmS16>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(RefGatherMultiDimFloatTest)
BOOST_AUTO_TEST_CASE(RefGatherMultiDimUint8Test)
{
- GatherMultiDimEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ GatherMultiDimEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(RefGatherMultiDimInt16Test)
{
- GatherMultiDimEndToEnd<armnn::DataType::QuantisedSymm16>(defaultBackends);
+ GatherMultiDimEndToEnd<armnn::DataType::QSymmS16>(defaultBackends);
}
// DepthToSpace
BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNchwUint8)
{
- DepthToSpaceEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends, armnn::DataLayout::NCHW);
+ DepthToSpaceEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, armnn::DataLayout::NCHW);
}
BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNchwInt16)
{
- DepthToSpaceEndToEnd<armnn::DataType::QuantisedSymm16>(defaultBackends, armnn::DataLayout::NCHW);
+ DepthToSpaceEndToEnd<armnn::DataType::QSymmS16>(defaultBackends, armnn::DataLayout::NCHW);
}
BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNhwcFloat32)
BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNhwcUint8)
{
- DepthToSpaceEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends, armnn::DataLayout::NHWC);
+ DepthToSpaceEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, armnn::DataLayout::NHWC);
}
BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNhwcInt16)
{
- DepthToSpaceEndToEnd<armnn::DataType::QuantisedSymm16>(defaultBackends, armnn::DataLayout::NHWC);
+ DepthToSpaceEndToEnd<armnn::DataType::QSymmS16>(defaultBackends, armnn::DataLayout::NHWC);
}
// Dequantize
BOOST_AUTO_TEST_CASE(DequantizeEndToEndSimpleTest)
{
- DequantizeEndToEndSimple<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ DequantizeEndToEndSimple<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(DequantizeEndToEndOffsetTest)
{
- DequantizeEndToEndOffset<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ DequantizeEndToEndOffset<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(DequantizeEndToEndSimpleInt16Test)
{
- DequantizeEndToEndSimple<armnn::DataType::QuantisedSymm16>(defaultBackends);
+ DequantizeEndToEndSimple<armnn::DataType::QSymmS16>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(DequantizeEndToEndOffsetInt16Test)
{
- DequantizeEndToEndOffset<armnn::DataType::QuantisedSymm16>(defaultBackends);
+ DequantizeEndToEndOffset<armnn::DataType::QSymmS16>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(RefDetectionPostProcessRegularNmsTest)
QuantizeData(qBoxEncodings.data(), boxEncodings.data(), boxEncodingsInfo);
QuantizeData(qScores.data(), scores.data(), scoresInfo);
QuantizeData(qAnchors.data(), anchors.data(), anchorsInfo);
- DetectionPostProcessRegularNmsEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends, qBoxEncodings,
+ DetectionPostProcessRegularNmsEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, qBoxEncodings,
qScores, qAnchors,
1.0f, 1, 0.01f, 0, 0.5f, 0);
}
QuantizeData(qBoxEncodings.data(), boxEncodings.data(), boxEncodingsInfo);
QuantizeData(qScores.data(), scores.data(), scoresInfo);
QuantizeData(qAnchors.data(), anchors.data(), anchorsInfo);
- DetectionPostProcessFastNmsEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends, qBoxEncodings,
+ DetectionPostProcessFastNmsEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, qBoxEncodings,
qScores, qAnchors,
1.0f, 1, 0.01f, 0, 0.5f, 0);
}
BOOST_AUTO_TEST_CASE(RefPreluEndToEndTestUint8)
{
- PreluEndToEndPositiveTest<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ PreluEndToEndPositiveTest<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(RefPreluEndToEndTestQSymm16)
{
- PreluEndToEndPositiveTest<armnn::DataType::QuantisedSymm16>(defaultBackends);
+ PreluEndToEndPositiveTest<armnn::DataType::QSymmS16>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(RefSpaceToDepthNhwcEndToEndTest1)
BOOST_AUTO_TEST_CASE(RefSplitter1dEndToEndUint8Test)
{
- Splitter1dEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ Splitter1dEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(RefSplitter2dDim0EndToEndTest)
BOOST_AUTO_TEST_CASE(RefSplitter2dDim0EndToEndUint8Test)
{
- Splitter2dDim0EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ Splitter2dDim0EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(RefSplitter2dDim1EndToEndUint8Test)
{
- Splitter2dDim1EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ Splitter2dDim1EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(RefSplitter3dDim0EndToEndTest)
BOOST_AUTO_TEST_CASE(RefSplitter3dDim0EndToEndUint8Test)
{
- Splitter3dDim0EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ Splitter3dDim0EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(RefSplitter3dDim1EndToEndUint8Test)
{
- Splitter3dDim1EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ Splitter3dDim1EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(RefSplitter3dDim2EndToEndUint8Test)
{
- Splitter3dDim2EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ Splitter3dDim2EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(RefSplitter4dDim0EndToEndTest)
BOOST_AUTO_TEST_CASE(RefSplitter4dDim0EndToEndUint8Test)
{
- Splitter4dDim0EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ Splitter4dDim0EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(RefSplitter4dDim1EndToEndUint8Test)
{
- Splitter4dDim1EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ Splitter4dDim1EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(RefSplitter4dDim2EndToEndUint8Test)
{
- Splitter4dDim2EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ Splitter4dDim2EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(RefSplitter4dDim3EndToEndUint8Test)
{
- Splitter4dDim3EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ Splitter4dDim3EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
// TransposeConvolution2d
BOOST_AUTO_TEST_CASE(RefTransposeConvolution2dEndToEndUint8NchwTest)
{
- TransposeConvolution2dEndToEnd<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
+ TransposeConvolution2dEndToEnd<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
defaultBackends, armnn::DataLayout::NCHW);
}
BOOST_AUTO_TEST_CASE(RefTransposeConvolution2dEndToEndInt16NchwTest)
{
- TransposeConvolution2dEndToEnd<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
+ TransposeConvolution2dEndToEnd<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
defaultBackends, armnn::DataLayout::NCHW);
}
BOOST_AUTO_TEST_CASE(RefTransposeConvolution2dEndToEndUint8NhwcTest)
{
- TransposeConvolution2dEndToEnd<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
+ TransposeConvolution2dEndToEnd<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
defaultBackends, armnn::DataLayout::NHWC);
}
BOOST_AUTO_TEST_CASE(RefTransposeConvolution2dEndToEndInt16NhwcTest)
{
- TransposeConvolution2dEndToEnd<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
+ TransposeConvolution2dEndToEnd<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
defaultBackends, armnn::DataLayout::NHWC);
}
BOOST_AUTO_TEST_CASE(RefResizeBilinearEndToEndUint8NchwTest)
{
- ResizeBilinearEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends, armnn::DataLayout::NCHW);
+ ResizeBilinearEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, armnn::DataLayout::NCHW);
}
BOOST_AUTO_TEST_CASE(RefResizeBilinearEndToEndInt16NchwTest)
{
- ResizeBilinearEndToEnd<armnn::DataType::QuantisedSymm16>(defaultBackends, armnn::DataLayout::NCHW);
+ ResizeBilinearEndToEnd<armnn::DataType::QSymmS16>(defaultBackends, armnn::DataLayout::NCHW);
}
BOOST_AUTO_TEST_CASE(RefResizeBilinearEndToEndFloatNhwcTest)
BOOST_AUTO_TEST_CASE(RefResizeBilinearEndToEndUint8NhwcTest)
{
- ResizeBilinearEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends, armnn::DataLayout::NHWC);
+ ResizeBilinearEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, armnn::DataLayout::NHWC);
}
BOOST_AUTO_TEST_CASE(RefResizeBilinearEndToEndInt16NhwcTest)
{
- ResizeBilinearEndToEnd<armnn::DataType::QuantisedSymm16>(defaultBackends, armnn::DataLayout::NHWC);
+ ResizeBilinearEndToEnd<armnn::DataType::QSymmS16>(defaultBackends, armnn::DataLayout::NHWC);
}
// Resize NearestNeighbor
BOOST_AUTO_TEST_CASE(RefResizeNearestNeighborEndToEndUint8NchwTest)
{
- ResizeNearestNeighborEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends, armnn::DataLayout::NCHW);
+ ResizeNearestNeighborEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, armnn::DataLayout::NCHW);
}
BOOST_AUTO_TEST_CASE(RefResizeNearestNeighborEndToEndInt16NchwTest)
{
- ResizeNearestNeighborEndToEnd<armnn::DataType::QuantisedSymm16>(defaultBackends, armnn::DataLayout::NCHW);
+ ResizeNearestNeighborEndToEnd<armnn::DataType::QSymmS16>(defaultBackends, armnn::DataLayout::NCHW);
}
BOOST_AUTO_TEST_CASE(RefResizeNearestNeighborEndToEndFloatNhwcTest)
BOOST_AUTO_TEST_CASE(RefResizeNearestNeighborEndToEndUint8NhwcTest)
{
- ResizeNearestNeighborEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends, armnn::DataLayout::NHWC);
+ ResizeNearestNeighborEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, armnn::DataLayout::NHWC);
}
BOOST_AUTO_TEST_CASE(RefResizeNearestNeighborEndToEndInt16NhwcTest)
{
- ResizeNearestNeighborEndToEnd<armnn::DataType::QuantisedSymm16>(defaultBackends, armnn::DataLayout::NHWC);
+ ResizeNearestNeighborEndToEnd<armnn::DataType::QSymmS16>(defaultBackends, armnn::DataLayout::NHWC);
}
// InstanceNormalization
BOOST_AUTO_TEST_CASE(RefArgMaxSimpleUint8Test)
{
- ArgMaxEndToEndSimple<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ ArgMaxEndToEndSimple<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(RefArgMinSimpleTest)
BOOST_AUTO_TEST_CASE(RefArgMinSimpleUint8Test)
{
- ArgMinEndToEndSimple<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ ArgMinEndToEndSimple<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(RefArgMaxAxis0Test)
BOOST_AUTO_TEST_CASE(RefArgMaxAxis0Uint8Test)
{
- ArgMaxAxis0EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ ArgMaxAxis0EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(RefArgMinAxis0Test)
BOOST_AUTO_TEST_CASE(RefArgMinAxis0Uint8Test)
{
- ArgMinAxis0EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ ArgMinAxis0EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(RefArgMaxAxis1Test)
BOOST_AUTO_TEST_CASE(RefArgMaxAxis1Uint8Test)
{
- ArgMaxAxis1EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ ArgMaxAxis1EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(RefArgMinAxis1Test)
BOOST_AUTO_TEST_CASE(RefArgMinAxis1Uint8Test)
{
- ArgMinAxis1EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ ArgMinAxis1EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(RefArgMaxAxis2Test)
BOOST_AUTO_TEST_CASE(RefArgMaxAxis2Uint8Test)
{
- ArgMaxAxis2EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ ArgMaxAxis2EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(RefArgMinAxis2Test)
BOOST_AUTO_TEST_CASE(RefArgMinAxis2Uint8Test)
{
- ArgMinAxis2EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ ArgMinAxis2EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(RefArgMaxAxis3Test)
BOOST_AUTO_TEST_CASE(RefArgMaxAxis3Uint8Test)
{
- ArgMaxAxis3EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ ArgMaxAxis3EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(RefArgMinAxis3Test)
BOOST_AUTO_TEST_CASE(RefArgMinAxis3Uint8Test)
{
- ArgMinAxis3EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+ ArgMinAxis3EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
}
#if !defined(__ANDROID__)
BOOST_AUTO_TEST_CASE(IsLayerSupportedUint8Reference)
{
armnn::RefWorkloadFactory factory;
- IsLayerSupportedTests<armnn::RefWorkloadFactory, armnn::DataType::QuantisedAsymm8>(&factory);
+ IsLayerSupportedTests<armnn::RefWorkloadFactory, armnn::DataType::QAsymmU8>(&factory);
}
BOOST_AUTO_TEST_CASE(IsLayerSupportedInt16Reference)
{
armnn::RefWorkloadFactory factory;
- IsLayerSupportedTests<armnn::RefWorkloadFactory, armnn::DataType::QuantisedSymm16>(&factory);
+ IsLayerSupportedTests<armnn::RefWorkloadFactory, armnn::DataType::QSymmS16>(&factory);
}
BOOST_AUTO_TEST_CASE(IsConvertFp16ToFp32SupportedReference)
false,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(Convolution2d3x3Dilation3x3Uint8,
- Convolution2d3x3Dilation3x3Test<DataType::QuantisedAsymm8, DataType::Signed32>,
+ Convolution2d3x3Dilation3x3Test<DataType::QAsymmU8, DataType::Signed32>,
false,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(Convolution2d3x3Dilation3x3NhwcUint8,
- Convolution2d3x3Dilation3x3Test<DataType::QuantisedAsymm8, DataType::Signed32>,
+ Convolution2d3x3Dilation3x3Test<DataType::QAsymmU8, DataType::Signed32>,
false,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(Convolution2d3x3Dilation3x3Int16,
- Convolution2d3x3Dilation3x3Test<DataType::QuantisedSymm16, DataType::Signed32>,
+ Convolution2d3x3Dilation3x3Test<DataType::QSymmS16, DataType::Signed32>,
false,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(Convolution2d3x3Dilation3x3NhwcInt16,
- Convolution2d3x3Dilation3x3Test<DataType::QuantisedSymm16, DataType::Signed32>,
+ Convolution2d3x3Dilation3x3Test<DataType::QSymmS16, DataType::Signed32>,
false,
DataLayout::NHWC)
false,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(Convolution2d2x3x3Dilation3x3Uint8,
- Convolution2d2x3x3Dilation3x3Test<DataType::QuantisedAsymm8, DataType::Signed32>,
+ Convolution2d2x3x3Dilation3x3Test<DataType::QAsymmU8, DataType::Signed32>,
false,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(Convolution2d2x3x3Dilation3x3NhwcUint8,
- Convolution2d2x3x3Dilation3x3Test<DataType::QuantisedAsymm8, DataType::Signed32>,
+ Convolution2d2x3x3Dilation3x3Test<DataType::QAsymmU8, DataType::Signed32>,
false,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(Convolution2d2x3x3Dilation3x3Int16,
- Convolution2d2x3x3Dilation3x3Test<DataType::QuantisedSymm16, DataType::Signed32>,
+ Convolution2d2x3x3Dilation3x3Test<DataType::QSymmS16, DataType::Signed32>,
false,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(Convolution2d2x3x3Dilation3x3NhwcInt16,
- Convolution2d2x3x3Dilation3x3Test<DataType::QuantisedSymm16, DataType::Signed32>,
+ Convolution2d2x3x3Dilation3x3Test<DataType::QSymmS16, DataType::Signed32>,
false,
DataLayout::NHWC)
false,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(Convolution2d2x2Dilation2x2Padding2x2Stride3x3Uint8,
- Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<DataType::QuantisedAsymm8, DataType::Signed32>,
+ Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<DataType::QAsymmU8, DataType::Signed32>,
false,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(Convolution2d2x2Dilation2x2Padding2x2Stride3x3NhwcUint8,
- Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<DataType::QuantisedAsymm8, DataType::Signed32>,
+ Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<DataType::QAsymmU8, DataType::Signed32>,
false,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(Convolution2d2x2Dilation2x2Padding2x2Stride3x3Int16,
- Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<DataType::QuantisedSymm16, DataType::Signed32>,
+ Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<DataType::QSymmS16, DataType::Signed32>,
false,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(Convolution2d2x2Dilation2x2Padding2x2Stride3x3NhwcInt16,
- Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<DataType::QuantisedSymm16, DataType::Signed32>,
+ Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<DataType::QSymmS16, DataType::Signed32>,
false,
DataLayout::NHWC)
false,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2d3x3Dilation3x3Uint8,
- DepthwiseConvolution2d3x3Dilation3x3Test<DataType::QuantisedAsymm8, DataType::Signed32>,
+ DepthwiseConvolution2d3x3Dilation3x3Test<DataType::QAsymmU8, DataType::Signed32>,
false,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2d3x3Dilation3x3NhwcUint8,
- DepthwiseConvolution2d3x3Dilation3x3Test<DataType::QuantisedAsymm8, DataType::Signed32>,
+ DepthwiseConvolution2d3x3Dilation3x3Test<DataType::QAsymmU8, DataType::Signed32>,
false,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2d3x3Dilation3x3Int16,
- DepthwiseConvolution2d3x3Dilation3x3Test<DataType::QuantisedSymm16, DataType::Signed32>,
+ DepthwiseConvolution2d3x3Dilation3x3Test<DataType::QSymmS16, DataType::Signed32>,
false,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2d3x3Dilation3x3NhwcInt16,
- DepthwiseConvolution2d3x3Dilation3x3Test<DataType::QuantisedSymm16, DataType::Signed32>,
+ DepthwiseConvolution2d3x3Dilation3x3Test<DataType::QSymmS16, DataType::Signed32>,
false,
DataLayout::NHWC)
false,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2d2x3x3Dilation3x3Uint8,
- DepthwiseConvolution2d2x3x3Dilation3x3Test<DataType::QuantisedAsymm8, DataType::Signed32>,
+ DepthwiseConvolution2d2x3x3Dilation3x3Test<DataType::QAsymmU8, DataType::Signed32>,
false,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2d2x3x3Dilation3x3NhwcUint8,
- DepthwiseConvolution2d2x3x3Dilation3x3Test<DataType::QuantisedAsymm8, DataType::Signed32>,
+ DepthwiseConvolution2d2x3x3Dilation3x3Test<DataType::QAsymmU8, DataType::Signed32>,
false,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2d2x3x3Dilation3x3Int16,
- DepthwiseConvolution2d2x3x3Dilation3x3Test<DataType::QuantisedSymm16, DataType::Signed32>,
+ DepthwiseConvolution2d2x3x3Dilation3x3Test<DataType::QSymmS16, DataType::Signed32>,
false,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2d2x3x3Dilation3x3NhwcInt16,
- DepthwiseConvolution2d2x3x3Dilation3x3Test<DataType::QuantisedSymm16, DataType::Signed32>,
+ DepthwiseConvolution2d2x3x3Dilation3x3Test<DataType::QSymmS16, DataType::Signed32>,
false,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2dMult4,
// Fully Connected
ARMNN_AUTO_TEST_CASE(SimpleFullyConnected, FullyConnectedFloat32Test, false, false)
-ARMNN_AUTO_TEST_CASE(FullyConnectedUint8, FullyConnectedTest<DataType::QuantisedAsymm8>, false)
-ARMNN_AUTO_TEST_CASE(FullyConnectedQSymm16, FullyConnectedTest<DataType::QuantisedSymm16>, false)
+ARMNN_AUTO_TEST_CASE(FullyConnectedUint8, FullyConnectedTest<DataType::QAsymmU8>, false)
+ARMNN_AUTO_TEST_CASE(FullyConnectedQSymm16, FullyConnectedTest<DataType::QSymmS16>, false)
ARMNN_AUTO_TEST_CASE(SimpleFullyConnectedWithBias, FullyConnectedFloat32Test, true, false)
-ARMNN_AUTO_TEST_CASE(FullyConnectedBiasedUint8, FullyConnectedTest<DataType::QuantisedAsymm8>, true)
-ARMNN_AUTO_TEST_CASE(FullyConnectedBiasedQSymm16, FullyConnectedTest<DataType::QuantisedSymm16>, true)
+ARMNN_AUTO_TEST_CASE(FullyConnectedBiasedUint8, FullyConnectedTest<DataType::QAsymmU8>, true)
+ARMNN_AUTO_TEST_CASE(FullyConnectedBiasedQSymm16, FullyConnectedTest<DataType::QSymmS16>, true)
ARMNN_AUTO_TEST_CASE(SimpleFullyConnectedWithTranspose, FullyConnectedFloat32Test, false, true)
ARMNN_AUTO_TEST_CASE(FullyConnectedLarge, FullyConnectedLargeTest, false)
ARMNN_AUTO_TEST_CASE(ConcatUint8DifferentQParams, ConcatUint8DifferentQParamsTest)
ARMNN_AUTO_TEST_CASE(ConcatUint16, ConcatUint16Test)
ARMNN_AUTO_TEST_CASE(ConcatUint8DifferentInputOutputQParam,
- ConcatDifferentInputOutputQParamTest<DataType::QuantisedAsymm8>, true)
+ ConcatDifferentInputOutputQParamTest<DataType::QAsymmU8>, true)
ARMNN_AUTO_TEST_CASE(ConcatInt16DifferentInputOutputQParam,
- ConcatDifferentInputOutputQParamTest<DataType::QuantisedSymm16>, true)
+ ConcatDifferentInputOutputQParamTest<DataType::QSymmS16>, true)
// Add
ARMNN_AUTO_TEST_CASE(SimpleAdd, AdditionTest)
SimpleResizeBilinearTest<DataType::Float16>,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearUint8,
- SimpleResizeBilinearTest<DataType::QuantisedAsymm8>,
+ SimpleResizeBilinearTest<DataType::QAsymmU8>,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearUint16,
- SimpleResizeBilinearTest<DataType::QuantisedSymm16>,
+ SimpleResizeBilinearTest<DataType::QSymmS16>,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(ResizeBilinearNop,
ResizeBilinearNopTest<DataType::Float32>,
ResizeBilinearNopTest<DataType::Float16>,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(ResizeBilinearNopUint8,
- ResizeBilinearNopTest<DataType::QuantisedAsymm8>,
+ ResizeBilinearNopTest<DataType::QAsymmU8>,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(esizeBilinearNopUint16,
- SimpleResizeBilinearTest<DataType::QuantisedSymm16>,
+ SimpleResizeBilinearTest<DataType::QSymmS16>,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMin,
ResizeBilinearSqMinTest<DataType::Float32>,
ResizeBilinearSqMinTest<DataType::Float16>,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinUint8,
- ResizeBilinearSqMinTest<DataType::QuantisedAsymm8>,
+ ResizeBilinearSqMinTest<DataType::QAsymmU8>,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinUint16,
- SimpleResizeBilinearTest<DataType::QuantisedSymm16>,
+ SimpleResizeBilinearTest<DataType::QSymmS16>,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(ResizeBilinearMin,
ResizeBilinearMinTest<DataType::Float32>,
ResizeBilinearMinTest<DataType::Float16>,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(ResizeBilinearMinUint8,
- ResizeBilinearMinTest<DataType::QuantisedAsymm8>,
+ ResizeBilinearMinTest<DataType::QAsymmU8>,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(ResizeBilinearMinUint16,
- SimpleResizeBilinearTest<DataType::QuantisedSymm16>,
+ SimpleResizeBilinearTest<DataType::QSymmS16>,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(ResizeBilinearMag,
ResizeBilinearMagTest<DataType::Float32>,
ResizeBilinearMagTest<DataType::Float16>,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(ResizeBilinearMagUint8,
- ResizeBilinearMagTest<DataType::QuantisedAsymm8>,
+ ResizeBilinearMagTest<DataType::QAsymmU8>,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(ResizeBilinearMagUint16,
- SimpleResizeBilinearTest<DataType::QuantisedSymm16>,
+ SimpleResizeBilinearTest<DataType::QSymmS16>,
DataLayout::NCHW)
// Resize Bilinear - NHWC
ResizeBilinearNopTest<DataType::Float16>,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(ResizeBilinearNopUint8Nhwc,
- ResizeBilinearNopTest<DataType::QuantisedAsymm8>,
+ ResizeBilinearNopTest<DataType::QAsymmU8>,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(ResizeBilinearNopUint16Nhwc,
- ResizeBilinearNopTest<DataType::QuantisedSymm16>,
+ ResizeBilinearNopTest<DataType::QSymmS16>,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearNhwc,
SimpleResizeBilinearTest<DataType::Float32>,
SimpleResizeBilinearTest<DataType::Float16>,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearUint8Nhwc,
- SimpleResizeBilinearTest<DataType::QuantisedAsymm8>,
+ SimpleResizeBilinearTest<DataType::QAsymmU8>,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearUint16Nhwc,
- ResizeBilinearNopTest<DataType::QuantisedSymm16>,
+ ResizeBilinearNopTest<DataType::QSymmS16>,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinNhwc,
ResizeBilinearSqMinTest<DataType::Float32>,
ResizeBilinearSqMinTest<DataType::Float16>,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinUint8Nhwc,
- ResizeBilinearSqMinTest<DataType::QuantisedAsymm8>,
+ ResizeBilinearSqMinTest<DataType::QAsymmU8>,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinUint16Nhwc,
- ResizeBilinearNopTest<DataType::QuantisedSymm16>,
+ ResizeBilinearNopTest<DataType::QSymmS16>,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(ResizeBilinearMinNhwc,
ResizeBilinearMinTest<DataType::Float32>,
ResizeBilinearMinTest<DataType::Float16>,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(ResizeBilinearMinUint8Nhwc,
- ResizeBilinearMinTest<DataType::QuantisedAsymm8>,
+ ResizeBilinearMinTest<DataType::QAsymmU8>,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(ResizeBilinearMinUint16Nhwc,
- ResizeBilinearNopTest<DataType::QuantisedSymm16>,
+ ResizeBilinearNopTest<DataType::QSymmS16>,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(ResizeBilinearMagNhwc,
ResizeBilinearMagTest<DataType::Float32>,
ResizeBilinearMagTest<DataType::Float16>,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(ResizeBilinearMagUint8Nhwc,
- ResizeBilinearMagTest<DataType::QuantisedAsymm8>,
+ ResizeBilinearMagTest<DataType::QAsymmU8>,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(ResizeBilinearMagUint16Nhwc,
- ResizeBilinearNopTest<DataType::QuantisedSymm16>,
+ ResizeBilinearNopTest<DataType::QSymmS16>,
DataLayout::NHWC)
// Resize NearestNeighbor - NCHW
SimpleResizeNearestNeighborTest<DataType::Float32>,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighborUint8,
- SimpleResizeNearestNeighborTest<DataType::QuantisedAsymm8>,
+ SimpleResizeNearestNeighborTest<DataType::QAsymmU8>,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighborUint16,
- SimpleResizeNearestNeighborTest<DataType::QuantisedSymm16>,
+ SimpleResizeNearestNeighborTest<DataType::QSymmS16>,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborNop,
ResizeNearestNeighborNopTest<DataType::Float32>,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborNopUint8,
- ResizeNearestNeighborNopTest<DataType::QuantisedAsymm8>,
+ ResizeNearestNeighborNopTest<DataType::QAsymmU8>,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(esizeNearestNeighborNopUint16,
- SimpleResizeNearestNeighborTest<DataType::QuantisedSymm16>,
+ SimpleResizeNearestNeighborTest<DataType::QSymmS16>,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMin,
ResizeNearestNeighborSqMinTest<DataType::Float32>,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMinUint8,
- ResizeNearestNeighborSqMinTest<DataType::QuantisedAsymm8>,
+ ResizeNearestNeighborSqMinTest<DataType::QAsymmU8>,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMinUint16,
- SimpleResizeNearestNeighborTest<DataType::QuantisedSymm16>,
+ SimpleResizeNearestNeighborTest<DataType::QSymmS16>,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMin,
ResizeNearestNeighborMinTest<DataType::Float32>,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMinUint8,
- ResizeNearestNeighborMinTest<DataType::QuantisedAsymm8>,
+ ResizeNearestNeighborMinTest<DataType::QAsymmU8>,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMinUint16,
- SimpleResizeNearestNeighborTest<DataType::QuantisedSymm16>,
+ SimpleResizeNearestNeighborTest<DataType::QSymmS16>,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMag,
ResizeNearestNeighborMagTest<DataType::Float32>,
DataLayout::NCHW, 0.10f, 50, 0.11f, 20)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMagUint8,
- ResizeNearestNeighborMagTest<DataType::QuantisedAsymm8>,
+ ResizeNearestNeighborMagTest<DataType::QAsymmU8>,
DataLayout::NCHW, 0.10f, 50, 0.11f, 20)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMagUint16,
- SimpleResizeNearestNeighborTest<DataType::QuantisedSymm16>,
+ SimpleResizeNearestNeighborTest<DataType::QSymmS16>,
DataLayout::NCHW)
// Resize NearestNeighbor - NHWC
ResizeNearestNeighborNopTest<DataType::Float32>,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborNopUint8Nhwc,
- ResizeNearestNeighborNopTest<DataType::QuantisedAsymm8>,
+ ResizeNearestNeighborNopTest<DataType::QAsymmU8>,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborNopUint16Nhwc,
- ResizeNearestNeighborNopTest<DataType::QuantisedSymm16>,
+ ResizeNearestNeighborNopTest<DataType::QSymmS16>,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighborNhwc,
SimpleResizeNearestNeighborTest<DataType::Float32>,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighborUint8Nhwc,
- SimpleResizeNearestNeighborTest<DataType::QuantisedAsymm8>,
+ SimpleResizeNearestNeighborTest<DataType::QAsymmU8>,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighborUint16Nhwc,
- ResizeNearestNeighborNopTest<DataType::QuantisedSymm16>,
+ ResizeNearestNeighborNopTest<DataType::QSymmS16>,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMinNhwc,
ResizeNearestNeighborSqMinTest<DataType::Float32>,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMinUint8Nhwc,
- ResizeNearestNeighborSqMinTest<DataType::QuantisedAsymm8>,
+ ResizeNearestNeighborSqMinTest<DataType::QAsymmU8>,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMinUint16Nhwc,
- ResizeNearestNeighborNopTest<DataType::QuantisedSymm16>,
+ ResizeNearestNeighborNopTest<DataType::QSymmS16>,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMinNhwc,
ResizeNearestNeighborMinTest<DataType::Float32>,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMinUint8Nhwc,
- ResizeNearestNeighborMinTest<DataType::QuantisedAsymm8>,
+ ResizeNearestNeighborMinTest<DataType::QAsymmU8>,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMinUint16Nhwc,
- ResizeNearestNeighborNopTest<DataType::QuantisedSymm16>,
+ ResizeNearestNeighborNopTest<DataType::QSymmS16>,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMagNhwc,
ResizeNearestNeighborMagTest<DataType::Float32>,
DataLayout::NHWC, 0.10f, 50, 0.11f, 20)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMagUint8Nhwc,
- ResizeNearestNeighborMagTest<DataType::QuantisedAsymm8>,
+ ResizeNearestNeighborMagTest<DataType::QAsymmU8>,
DataLayout::NHWC, 0.10f, 50, 0.11f, 20)
ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMagUint16Nhwc,
- ResizeNearestNeighborNopTest<DataType::QuantisedSymm16>,
+ ResizeNearestNeighborNopTest<DataType::QSymmS16>,
DataLayout::NHWC)
// Fake Quantization
ARMNN_AUTO_TEST_CASE(PadUint83d, PadUint83dTest)
ARMNN_AUTO_TEST_CASE(PadUint84d, PadUint84dTest)
-ARMNN_AUTO_TEST_CASE(Pad2dQSymm16, Pad2dTestCommon<DataType::QuantisedSymm16>, 2.0f, 0, 0.0f)
-ARMNN_AUTO_TEST_CASE(Pad2dQSymm16CustomPadding, Pad2dTestCommon<DataType::QuantisedSymm16>, 2.0f, 0, 1.0f)
-ARMNN_AUTO_TEST_CASE(Pad3dQSymm16, Pad3dTestCommon<DataType::QuantisedSymm16>, 2.0f, 0)
-ARMNN_AUTO_TEST_CASE(Pad4dQSymm16, Pad4dTestCommon<DataType::QuantisedSymm16>, 2.0f, 0)
+ARMNN_AUTO_TEST_CASE(Pad2dQSymm16, Pad2dTestCommon<DataType::QSymmS16>, 2.0f, 0, 0.0f)
+ARMNN_AUTO_TEST_CASE(Pad2dQSymm16CustomPadding, Pad2dTestCommon<DataType::QSymmS16>, 2.0f, 0, 1.0f)
+ARMNN_AUTO_TEST_CASE(Pad3dQSymm16, Pad3dTestCommon<DataType::QSymmS16>, 2.0f, 0)
+ARMNN_AUTO_TEST_CASE(Pad4dQSymm16, Pad4dTestCommon<DataType::QSymmS16>, 2.0f, 0)
// Constant
ARMNN_AUTO_TEST_CASE(Constant, ConstantTest)
// Floor
ARMNN_AUTO_TEST_CASE(SimpleFloor, SimpleFloorTest<DataType::Float32>)
ARMNN_AUTO_TEST_CASE(SimpleFloorFloat16, SimpleFloorTest<DataType::Float16>)
-ARMNN_AUTO_TEST_CASE(SimpleFloorQuantisedSymm16, SimpleFloorTest<DataType::QuantisedSymm16>)
+ARMNN_AUTO_TEST_CASE(SimpleFloorQuantisedSymm16, SimpleFloorTest<DataType::QSymmS16>)
// Reshape
ARMNN_AUTO_TEST_CASE(SimpleReshapeFloat32, SimpleReshapeTest<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE(SimpleReshapeQuantisedAsymm8, SimpleReshapeTest<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(SimpleReshapeQuantisedSymm16, SimpleReshapeTest<DataType::QuantisedSymm16>)
+ARMNN_AUTO_TEST_CASE(SimpleReshapeQuantisedAsymm8, SimpleReshapeTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(SimpleReshapeQuantisedSymm16, SimpleReshapeTest<DataType::QSymmS16>)
ARMNN_AUTO_TEST_CASE(Reshape5d, Reshape5dTest<DataType::Float32>)
// Rsqrt
ARMNN_AUTO_TEST_CASE(RsqrtNegative, RsqrtNegativeTest<DataType::Float32>)
ARMNN_AUTO_TEST_CASE(Rsqrt2dFloat16, Rsqrt2dTest<DataType::Float16>)
ARMNN_AUTO_TEST_CASE(Rsqrt3dFloat16, Rsqrt3dTest<DataType::Float16>)
-ARMNN_AUTO_TEST_CASE(Rsqrt2dQuantisedAsymm8, Rsqrt2dTest<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(Rsqrt3dQuantisedAsymm8, Rsqrt3dTest<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(Rsqrt2dQuantisedSymm16, Rsqrt2dTest<DataType::QuantisedSymm16>)
-ARMNN_AUTO_TEST_CASE(Rsqrt3dQuantisedSymm16, Rsqrt3dTest<DataType::QuantisedSymm16>)
+ARMNN_AUTO_TEST_CASE(Rsqrt2dQuantisedAsymm8, Rsqrt2dTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(Rsqrt3dQuantisedAsymm8, Rsqrt3dTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(Rsqrt2dQuantisedSymm16, Rsqrt2dTest<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_CASE(Rsqrt3dQuantisedSymm16, Rsqrt3dTest<DataType::QSymmS16>)
// Permute
ARMNN_AUTO_TEST_CASE(SimplePermuteFloat32, SimplePermuteTest<DataType::Float32>)
ARMNN_AUTO_TEST_CASE(PermuteFloat32ValueSet1Test, PermuteValueSet1Test<DataType::Float32>)
ARMNN_AUTO_TEST_CASE(PermuteFloat32ValueSet2Test, PermuteValueSet2Test<DataType::Float32>)
ARMNN_AUTO_TEST_CASE(PermuteFloat32ValueSet3Test, PermuteValueSet3Test<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE(SimplePermuteQASymm8, SimplePermuteTest<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(PermuteQASymm8ValueSet1Test, PermuteValueSet1Test<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(PermuteQASymm8ValueSet2Test, PermuteValueSet2Test<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(PermuteQASymm8ValueSet3Test, PermuteValueSet3Test<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(SimplePermuteQSymm16, SimplePermuteTest<DataType::QuantisedSymm16>)
-ARMNN_AUTO_TEST_CASE(PermuteQSymm16ValueSet1Test, PermuteValueSet1Test<DataType::QuantisedSymm16>)
-ARMNN_AUTO_TEST_CASE(PermuteQSymm16ValueSet2Test, PermuteValueSet2Test<DataType::QuantisedSymm16>)
-ARMNN_AUTO_TEST_CASE(PermuteQSymm16ValueSet3Test, PermuteValueSet3Test<DataType::QuantisedSymm16>)
+ARMNN_AUTO_TEST_CASE(SimplePermuteQASymm8, SimplePermuteTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(PermuteQASymm8ValueSet1Test, PermuteValueSet1Test<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(PermuteQASymm8ValueSet2Test, PermuteValueSet2Test<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(PermuteQASymm8ValueSet3Test, PermuteValueSet3Test<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(SimplePermuteQSymm16, SimplePermuteTest<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_CASE(PermuteQSymm16ValueSet1Test, PermuteValueSet1Test<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_CASE(PermuteQSymm16ValueSet2Test, PermuteValueSet2Test<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_CASE(PermuteQSymm16ValueSet3Test, PermuteValueSet3Test<DataType::QSymmS16>)
// Lstm
BOOST_AUTO_TEST_CASE(LstmUtilsZeroVector) {
ARMNN_AUTO_TEST_CASE(MeanVts2Float32, MeanVts2Test<DataType::Float32>)
ARMNN_AUTO_TEST_CASE(MeanVts3Float32, MeanVts3Test<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE(MeanSimpleQuantisedAsymm8, MeanSimpleTest<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(MeanSimpleAxisQuantisedAsymm8, MeanSimpleAxisTest<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(MeanKeepDimsQuantisedAsymm8, MeanKeepDimsTest<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(MeanMultipleDimsQuantisedAsymm8, MeanMultipleDimsTest<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(MeanVts1QuantisedAsymm8, MeanVts1Test<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(MeanVts2QuantisedAsymm8, MeanVts2Test<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(MeanVts3QuantisedAsymm8, MeanVts3Test<DataType::QuantisedAsymm8>)
-
-ARMNN_AUTO_TEST_CASE(MeanSimpleQuantisedSymm16, MeanSimpleTest<DataType::QuantisedSymm16>)
-ARMNN_AUTO_TEST_CASE(MeanSimpleAxisQuantisedSymm16, MeanSimpleAxisTest<DataType::QuantisedSymm16>)
-ARMNN_AUTO_TEST_CASE(MeanKeepDimsQuantisedSymm16, MeanKeepDimsTest<DataType::QuantisedSymm16>)
-ARMNN_AUTO_TEST_CASE(MeanMultipleDimsQuantisedSymm16, MeanMultipleDimsTest<DataType::QuantisedSymm16>)
-ARMNN_AUTO_TEST_CASE(MeanVts1QuantisedSymm16, MeanVts1Test<DataType::QuantisedSymm16>)
-ARMNN_AUTO_TEST_CASE(MeanVts2QuantisedSymm16, MeanVts2Test<DataType::QuantisedSymm16>)
-ARMNN_AUTO_TEST_CASE(MeanVts3QuantisedSymm16, MeanVts3Test<DataType::QuantisedSymm16>)
+ARMNN_AUTO_TEST_CASE(MeanSimpleQuantisedAsymm8, MeanSimpleTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(MeanSimpleAxisQuantisedAsymm8, MeanSimpleAxisTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(MeanKeepDimsQuantisedAsymm8, MeanKeepDimsTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(MeanMultipleDimsQuantisedAsymm8, MeanMultipleDimsTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(MeanVts1QuantisedAsymm8, MeanVts1Test<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(MeanVts2QuantisedAsymm8, MeanVts2Test<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(MeanVts3QuantisedAsymm8, MeanVts3Test<DataType::QAsymmU8>)
+
+ARMNN_AUTO_TEST_CASE(MeanSimpleQuantisedSymm16, MeanSimpleTest<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_CASE(MeanSimpleAxisQuantisedSymm16, MeanSimpleAxisTest<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_CASE(MeanKeepDimsQuantisedSymm16, MeanKeepDimsTest<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_CASE(MeanMultipleDimsQuantisedSymm16, MeanMultipleDimsTest<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_CASE(MeanVts1QuantisedSymm16, MeanVts1Test<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_CASE(MeanVts2QuantisedSymm16, MeanVts2Test<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_CASE(MeanVts3QuantisedSymm16, MeanVts3Test<DataType::QSymmS16>)
ARMNN_AUTO_TEST_CASE(AdditionAfterMaxPool, AdditionAfterMaxPoolTest)
ARMNN_AUTO_TEST_CASE(ArgMaxHeightSigned32, ArgMaxHeightTest<DataType::Signed32>)
ARMNN_AUTO_TEST_CASE(ArgMinWidthSigned32, ArgMinWidthTest<DataType::Signed32>)
-ARMNN_AUTO_TEST_CASE(ArgMaxSimpleQuantisedAsymm8, ArgMaxSimpleTest<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(ArgMinSimpleQuantisedAsymm8, ArgMinSimpleTest<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(ArgMinChannelQuantisedAsymm8, ArgMinChannelTest<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(ArgMaxChannelQuantisedAsymm8, ArgMaxChannelTest<DataType::QuantisedAsymm8>)
+ARMNN_AUTO_TEST_CASE(ArgMaxSimpleQuantisedAsymm8, ArgMaxSimpleTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(ArgMinSimpleQuantisedAsymm8, ArgMinSimpleTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(ArgMinChannelQuantisedAsymm8, ArgMinChannelTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(ArgMaxChannelQuantisedAsymm8, ArgMaxChannelTest<DataType::QAsymmU8>)
-ARMNN_AUTO_TEST_CASE(ArgMaxSimpleQuantisedSymm16, ArgMaxSimpleTest<DataType::QuantisedSymm16>)
-ARMNN_AUTO_TEST_CASE(ArgMinSimpleQuantisedSymm16, ArgMinSimpleTest<DataType::QuantisedSymm16>)
-ARMNN_AUTO_TEST_CASE(ArgMinChannelQuantisedSymm16, ArgMinChannelTest<DataType::QuantisedSymm16>)
-ARMNN_AUTO_TEST_CASE(ArgMaxChannelQuantisedSymm16, ArgMaxChannelTest<DataType::QuantisedSymm16>)
+ARMNN_AUTO_TEST_CASE(ArgMaxSimpleQuantisedSymm16, ArgMaxSimpleTest<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_CASE(ArgMinSimpleQuantisedSymm16, ArgMinSimpleTest<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_CASE(ArgMinChannelQuantisedSymm16, ArgMinChannelTest<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_CASE(ArgMaxChannelQuantisedSymm16, ArgMaxChannelTest<DataType::QSymmS16>)
// Space To Batch Nd
ARMNN_AUTO_TEST_CASE(SpaceToBatchNdSimpleFloat32, SpaceToBatchNdSimpleFloat32Test)
ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcFloat16_6, BatchToSpaceNdNhwcTest6<DataType::Float16>)
ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcFloat16_7, BatchToSpaceNdNhwcTest7<DataType::Float16>)
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcUint1, BatchToSpaceNdNhwcTest1<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcUint2, BatchToSpaceNdNhwcTest2<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcUint3, BatchToSpaceNdNhwcTest3<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcUint4, BatchToSpaceNdNhwcTest4<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcUint5, BatchToSpaceNdNhwcTest5<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcUint6, BatchToSpaceNdNhwcTest6<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcUint7, BatchToSpaceNdNhwcTest7<DataType::QuantisedAsymm8>)
-
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcQsymm16_1, BatchToSpaceNdNhwcTest1<DataType::QuantisedSymm16>)
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcQsymm16_2, BatchToSpaceNdNhwcTest2<DataType::QuantisedSymm16>)
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcQsymm16_3, BatchToSpaceNdNhwcTest3<DataType::QuantisedSymm16>)
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcQsymm16_4, BatchToSpaceNdNhwcTest4<DataType::QuantisedSymm16>)
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcQsymm16_5, BatchToSpaceNdNhwcTest5<DataType::QuantisedSymm16>)
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcQsymm16_6, BatchToSpaceNdNhwcTest6<DataType::QuantisedSymm16>)
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcQsymm16_7, BatchToSpaceNdNhwcTest7<DataType::QuantisedSymm16>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcUint1, BatchToSpaceNdNhwcTest1<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcUint2, BatchToSpaceNdNhwcTest2<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcUint3, BatchToSpaceNdNhwcTest3<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcUint4, BatchToSpaceNdNhwcTest4<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcUint5, BatchToSpaceNdNhwcTest5<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcUint6, BatchToSpaceNdNhwcTest6<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcUint7, BatchToSpaceNdNhwcTest7<DataType::QAsymmU8>)
+
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcQsymm16_1, BatchToSpaceNdNhwcTest1<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcQsymm16_2, BatchToSpaceNdNhwcTest2<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcQsymm16_3, BatchToSpaceNdNhwcTest3<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcQsymm16_4, BatchToSpaceNdNhwcTest4<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcQsymm16_5, BatchToSpaceNdNhwcTest5<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcQsymm16_6, BatchToSpaceNdNhwcTest6<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNhwcQsymm16_7, BatchToSpaceNdNhwcTest7<DataType::QSymmS16>)
ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwFloat16_1, BatchToSpaceNdNchwTest1<DataType::Float16>)
ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwFloat16_2, BatchToSpaceNdNchwTest2<DataType::Float16>)
ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwFloat16_6, BatchToSpaceNdNchwTest6<DataType::Float16>)
ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwFloat16_7, BatchToSpaceNdNchwTest7<DataType::Float16>)
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwUint1, BatchToSpaceNdNchwTest1<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwUint2, BatchToSpaceNdNchwTest2<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwUint3, BatchToSpaceNdNchwTest3<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwUint4, BatchToSpaceNdNchwTest4<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwUint5, BatchToSpaceNdNchwTest5<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwUint6, BatchToSpaceNdNchwTest6<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwUint7, BatchToSpaceNdNchwTest7<DataType::QuantisedAsymm8>)
-
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwQsymm16_1, BatchToSpaceNdNchwTest1<DataType::QuantisedSymm16>)
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwQsymm16_2, BatchToSpaceNdNchwTest2<DataType::QuantisedSymm16>)
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwQsymm16_3, BatchToSpaceNdNchwTest3<DataType::QuantisedSymm16>)
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwQsymm16_4, BatchToSpaceNdNchwTest4<DataType::QuantisedSymm16>)
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwQsymm16_5, BatchToSpaceNdNchwTest5<DataType::QuantisedSymm16>)
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwQsymm16_6, BatchToSpaceNdNchwTest6<DataType::QuantisedSymm16>)
-ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwQsymm16_7, BatchToSpaceNdNchwTest7<DataType::QuantisedSymm16>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwUint1, BatchToSpaceNdNchwTest1<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwUint2, BatchToSpaceNdNchwTest2<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwUint3, BatchToSpaceNdNchwTest3<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwUint4, BatchToSpaceNdNchwTest4<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwUint5, BatchToSpaceNdNchwTest5<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwUint6, BatchToSpaceNdNchwTest6<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwUint7, BatchToSpaceNdNchwTest7<DataType::QAsymmU8>)
+
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwQsymm16_1, BatchToSpaceNdNchwTest1<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwQsymm16_2, BatchToSpaceNdNchwTest2<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwQsymm16_3, BatchToSpaceNdNchwTest3<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwQsymm16_4, BatchToSpaceNdNchwTest4<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwQsymm16_5, BatchToSpaceNdNchwTest5<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwQsymm16_6, BatchToSpaceNdNchwTest6<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_CASE(BatchToSpaceNdNchwQsymm16_7, BatchToSpaceNdNchwTest7<DataType::QSymmS16>)
// DepthToSpace
ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwFloat32_1, DepthToSpaceTest1<DataType::Float32>, DataLayout::NCHW);
ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwFloat16_3, DepthToSpaceTest3<DataType::Float16>, DataLayout::NCHW);
ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwFloat16_4, DepthToSpaceTest4<DataType::Float16>, DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwUint8_1, DepthToSpaceTest1<DataType::QuantisedAsymm8>, DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwUint8_2, DepthToSpaceTest2<DataType::QuantisedAsymm8>, DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwUint8_3, DepthToSpaceTest3<DataType::QuantisedAsymm8>, DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwUint8_4, DepthToSpaceTest4<DataType::QuantisedAsymm8>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwUint8_1, DepthToSpaceTest1<DataType::QAsymmU8>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwUint8_2, DepthToSpaceTest2<DataType::QAsymmU8>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwUint8_3, DepthToSpaceTest3<DataType::QAsymmU8>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwUint8_4, DepthToSpaceTest4<DataType::QAsymmU8>, DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt16_1, DepthToSpaceTest1<DataType::QuantisedSymm16>, DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt16_2, DepthToSpaceTest2<DataType::QuantisedSymm16>, DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt16_3, DepthToSpaceTest3<DataType::QuantisedSymm16>, DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt16_4, DepthToSpaceTest4<DataType::QuantisedSymm16>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt16_1, DepthToSpaceTest1<DataType::QSymmS16>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt16_2, DepthToSpaceTest2<DataType::QSymmS16>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt16_3, DepthToSpaceTest3<DataType::QSymmS16>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt16_4, DepthToSpaceTest4<DataType::QSymmS16>, DataLayout::NCHW);
ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcFloat32_1, DepthToSpaceTest1<DataType::Float32>, DataLayout::NHWC);
ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcFloat32_2, DepthToSpaceTest2<DataType::Float32>, DataLayout::NHWC);
ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcFloat16_3, DepthToSpaceTest3<DataType::Float16>, DataLayout::NHWC);
ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcFloat16_4, DepthToSpaceTest4<DataType::Float16>, DataLayout::NHWC);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcUint8_1, DepthToSpaceTest1<DataType::QuantisedAsymm8>, DataLayout::NHWC);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcUint8_2, DepthToSpaceTest2<DataType::QuantisedAsymm8>, DataLayout::NHWC);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcUint8_3, DepthToSpaceTest3<DataType::QuantisedAsymm8>, DataLayout::NHWC);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcUint8_4, DepthToSpaceTest4<DataType::QuantisedAsymm8>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcUint8_1, DepthToSpaceTest1<DataType::QAsymmU8>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcUint8_2, DepthToSpaceTest2<DataType::QAsymmU8>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcUint8_3, DepthToSpaceTest3<DataType::QAsymmU8>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcUint8_4, DepthToSpaceTest4<DataType::QAsymmU8>, DataLayout::NHWC);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt16_1, DepthToSpaceTest1<DataType::QuantisedSymm16>, DataLayout::NHWC);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt16_2, DepthToSpaceTest2<DataType::QuantisedSymm16>, DataLayout::NHWC);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt16_3, DepthToSpaceTest3<DataType::QuantisedSymm16>, DataLayout::NHWC);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt16_4, DepthToSpaceTest4<DataType::QuantisedSymm16>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt16_1, DepthToSpaceTest1<DataType::QSymmS16>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt16_2, DepthToSpaceTest2<DataType::QSymmS16>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt16_3, DepthToSpaceTest3<DataType::QSymmS16>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt16_4, DepthToSpaceTest4<DataType::QSymmS16>, DataLayout::NHWC);
// SpaceToDepth
ARMNN_AUTO_TEST_CASE(SpaceToDepthNchwAsymmQ8, SpaceToDepthNchwAsymmQ8Test)
ARMNN_AUTO_TEST_CASE(AbsZero, AbsZeroTest<DataType::Float32>)
ARMNN_AUTO_TEST_CASE(Abs2dFloat16, Abs2dTest<DataType::Float16>)
ARMNN_AUTO_TEST_CASE(Abs3dFloat16, Abs3dTest<DataType::Float16>)
-ARMNN_AUTO_TEST_CASE(Abs2dQuantisedAsymm8, Abs2dTest<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(Abs3dQuantisedAsymm8, Abs3dTest<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(Abs2dQuantisedSymm16, Abs2dTest<DataType::QuantisedSymm16>)
-ARMNN_AUTO_TEST_CASE(Abs3dQuantisedSymm16, Abs3dTest<DataType::QuantisedSymm16>)
+ARMNN_AUTO_TEST_CASE(Abs2dQuantisedAsymm8, Abs2dTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(Abs3dQuantisedAsymm8, Abs3dTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(Abs2dQuantisedSymm16, Abs2dTest<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_CASE(Abs3dQuantisedSymm16, Abs3dTest<DataType::QSymmS16>)
// Detection PostProcess
BOOST_AUTO_TEST_CASE(DetectionPostProcessRegularNmsFloat)
BOOST_AUTO_TEST_CASE(DetectionPostProcessRegularNmsUint8)
{
DetectionPostProcessRegularNmsQuantizedTest<
- RefWorkloadFactory, DataType::QuantisedAsymm8>();
+ RefWorkloadFactory, DataType::QAsymmU8>();
}
BOOST_AUTO_TEST_CASE(DetectionPostProcessFastNmsUint8)
{
DetectionPostProcessRegularNmsQuantizedTest<
- RefWorkloadFactory, DataType::QuantisedAsymm8>();
+ RefWorkloadFactory, DataType::QAsymmU8>();
}
BOOST_AUTO_TEST_CASE(DetectionPostProcessRegularNmsInt16)
{
DetectionPostProcessRegularNmsQuantizedTest<
- RefWorkloadFactory, DataType::QuantisedSymm16>();
+ RefWorkloadFactory, DataType::QSymmS16>();
}
BOOST_AUTO_TEST_CASE(DetectionPostProcessFastNmsInt16)
{
DetectionPostProcessFastNmsQuantizedTest<
- RefWorkloadFactory, DataType::QuantisedSymm16>();
+ RefWorkloadFactory, DataType::QSymmS16>();
}
// Dequantize
// PReLU
ARMNN_AUTO_TEST_CASE(PreluFloat32, PreluTest<DataType::Float32>)
ARMNN_AUTO_TEST_CASE(PreluFloat16, PreluTest<DataType::Float16>)
-ARMNN_AUTO_TEST_CASE(PreluUint8, PreluTest<DataType::QuantisedAsymm8>)
-ARMNN_AUTO_TEST_CASE(PreluInt16, PreluTest<DataType::QuantisedSymm16>)
+ARMNN_AUTO_TEST_CASE(PreluUint8, PreluTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(PreluInt16, PreluTest<DataType::QSymmS16>)
// Slice
ARMNN_AUTO_TEST_CASE(Slice4dFloat32, Slice4dFloat32Test)
true,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(SimpleTransposeConvolution2dUint8Nchw,
- SimpleTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+ SimpleTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
true,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(SimpleTransposeConvolution2dUint8Nhwc,
- SimpleTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+ SimpleTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
true,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(SimpleTransposeConvolution2dInt16Nchw,
- SimpleTransposeConvolution2dTest<DataType::QuantisedSymm16, DataType::Signed32>,
+ SimpleTransposeConvolution2dTest<DataType::QSymmS16, DataType::Signed32>,
true,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(SimpleTransposeConvolution2dInt16Nhwc,
- SimpleTransposeConvolution2dTest<DataType::QuantisedSymm16, DataType::Signed32>,
+ SimpleTransposeConvolution2dTest<DataType::QSymmS16, DataType::Signed32>,
true,
DataLayout::NCHW)
true,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(UnbiasedSimpleTransposeConvolution2dUint8Nchw,
- SimpleTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+ SimpleTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
true,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(UnbiasedSimpleTransposeConvolution2dUint8Nhwc,
- SimpleTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+ SimpleTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
true,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(UnbiasedSimpleTransposeConvolution2dInt16Nchw,
- SimpleTransposeConvolution2dTest<DataType::QuantisedSymm16, DataType::Signed32>,
+ SimpleTransposeConvolution2dTest<DataType::QSymmS16, DataType::Signed32>,
true,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(UnbiasedSimpleTransposeConvolution2dInt16Nhwc,
- SimpleTransposeConvolution2dTest<DataType::QuantisedSymm16, DataType::Signed32>,
+ SimpleTransposeConvolution2dTest<DataType::QSymmS16, DataType::Signed32>,
true,
DataLayout::NCHW)
true,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(PaddedTransposeConvolution2dUint8Nchw,
- PaddedTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+ PaddedTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
true,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(PaddedTransposeConvolution2dUint8Nhwc,
- PaddedTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+ PaddedTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
true,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(PaddedTransposeConvolution2dInt16Nchw,
- PaddedTransposeConvolution2dTest<DataType::QuantisedSymm16, DataType::Signed32>,
+ PaddedTransposeConvolution2dTest<DataType::QSymmS16, DataType::Signed32>,
true,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(PaddedTransposeConvolution2dInt16Nhwc,
- PaddedTransposeConvolution2dTest<DataType::QuantisedSymm16, DataType::Signed32>,
+ PaddedTransposeConvolution2dTest<DataType::QSymmS16, DataType::Signed32>,
true,
DataLayout::NCHW)
true,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(UnbiasedPaddedTransposeConvolution2dUint8Nchw,
- PaddedTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+ PaddedTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
true,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(UnbiasedPaddedTransposeConvolution2dUint8Nhwc,
- PaddedTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+ PaddedTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
true,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(UnbiasedPaddedTransposeConvolution2dInt16Nchw,
- PaddedTransposeConvolution2dTest<DataType::QuantisedSymm16, DataType::Signed32>,
+ PaddedTransposeConvolution2dTest<DataType::QSymmS16, DataType::Signed32>,
true,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(UnbiasedPaddedTransposeConvolution2dInt16Nhwc,
- PaddedTransposeConvolution2dTest<DataType::QuantisedSymm16, DataType::Signed32>,
+ PaddedTransposeConvolution2dTest<DataType::QSymmS16, DataType::Signed32>,
true,
DataLayout::NCHW)
true,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(StridedTransposeConvolution2dUint8Nchw,
- StridedTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+ StridedTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
true,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(StridedTransposeConvolution2dUint8Nhwc,
- StridedTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+ StridedTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
true,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(StridedTransposeConvolution2dInt16Nchw,
- StridedTransposeConvolution2dTest<DataType::QuantisedSymm16, DataType::Signed32>,
+ StridedTransposeConvolution2dTest<DataType::QSymmS16, DataType::Signed32>,
true,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(StridedTransposeConvolution2dInt16Nhwc,
- StridedTransposeConvolution2dTest<DataType::QuantisedSymm16, DataType::Signed32>,
+ StridedTransposeConvolution2dTest<DataType::QSymmS16, DataType::Signed32>,
true,
DataLayout::NCHW)
true,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(UnbiasedStridedTransposeConvolution2dUint8Nchw,
- StridedTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+ StridedTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
true,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(UnbiasedStridedTransposeConvolution2dUint8Nhwc,
- StridedTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+ StridedTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
true,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(UnbiasedStridedTransposeConvolution2dInt16Nchw,
- StridedTransposeConvolution2dTest<DataType::QuantisedSymm16, DataType::Signed32>,
+ StridedTransposeConvolution2dTest<DataType::QSymmS16, DataType::Signed32>,
true,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(UnbiasedStridedTransposeConvolution2dInt16Nhwc,
- StridedTransposeConvolution2dTest<DataType::QuantisedSymm16, DataType::Signed32>,
+ StridedTransposeConvolution2dTest<DataType::QSymmS16, DataType::Signed32>,
true,
DataLayout::NCHW)
MultiChannelTransposeConvolution2dTest<DataType::Float32, DataType::Float32>,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(MultiChannelTransposeConvolution2dUint8Nchw,
- MultiChannelTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+ MultiChannelTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(MultiChannelTransposeConvolution2dUint8Nhwc,
- MultiChannelTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
+ MultiChannelTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(MultiChannelTransposeConvolution2dInt16Nchw,
- MultiChannelTransposeConvolution2dTest<DataType::QuantisedSymm16, DataType::Signed32>,
+ MultiChannelTransposeConvolution2dTest<DataType::QSymmS16, DataType::Signed32>,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(MultiChannelTransposeConvolution2dInt16Nhwc,
- MultiChannelTransposeConvolution2dTest<DataType::QuantisedSymm16, DataType::Signed32>,
+ MultiChannelTransposeConvolution2dTest<DataType::QSymmS16, DataType::Signed32>,
DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(TransposeConvolution2dPerAxisQuantTestNchw,
params.second,
params.first);
}
- case DataType::QuantisedAsymm8:
+ case DataType::QAsymmU8:
{
return std::make_unique<QASymm8Decoder>(
static_cast<const uint8_t*>(data),
info.GetQuantizationScale(),
info.GetQuantizationOffset());
}
- case DataType::QuantisedSymm16:
+ case DataType::QSymmS16:
{
return std::make_unique<QSymm16Decoder>(
static_cast<const int16_t*>(data),
params.second,
params.first);
}
- case armnn::DataType::QuantisedAsymm8:
+ case armnn::DataType::QAsymmU8:
{
return std::make_unique<QASymm8Encoder>(
static_cast<uint8_t*>(data),
info.GetQuantizationScale(),
info.GetQuantizationOffset());
}
- case armnn::DataType::QuantisedSymm16:
+ case armnn::DataType::QSymmS16:
{
return std::make_unique<QSymm16Encoder>(
static_cast<int16_t*>(data),
template class RefDebugWorkload<DataType::Float16>;
template class RefDebugWorkload<DataType::Float32>;
-template class RefDebugWorkload<DataType::QuantisedAsymm8>;
-template class RefDebugWorkload<DataType::QuantisedSymm16>;
+template class RefDebugWorkload<DataType::QAsymmU8>;
+template class RefDebugWorkload<DataType::QSymmS16>;
} // namespace armnn
using RefDebugFloat16Workload = RefDebugWorkload<DataType::Float16>;
using RefDebugFloat32Workload = RefDebugWorkload<DataType::Float32>;
-using RefDebugQAsymm8Workload = RefDebugWorkload<DataType::QuantisedAsymm8>;
-using RefDebugQSymm16Workload = RefDebugWorkload<DataType::QuantisedSymm16>;
+using RefDebugQAsymm8Workload = RefDebugWorkload<DataType::QAsymmU8>;
+using RefDebugQSymm16Workload = RefDebugWorkload<DataType::QSymmS16>;
} // namespace armnn
template class RefPadWorkload<DataType::Float32>;
template class RefPadWorkload<DataType::Float16>;
-template class RefPadWorkload<DataType::QuantisedAsymm8>;
-template class RefPadWorkload<DataType::QuantisedSymm16>;
+template class RefPadWorkload<DataType::QAsymmU8>;
+template class RefPadWorkload<DataType::QSymmS16>;
} //namespace armnn
\ No newline at end of file
using RefPadFloat32Workload = RefPadWorkload<DataType::Float32>;
using RefPadFloat16Workload = RefPadWorkload<DataType::Float16>;
-using RefPadQAsymm8Workload = RefPadWorkload<DataType::QuantisedAsymm8>;
-using RefPadQSymm16Workload = RefPadWorkload<DataType::QuantisedSymm16>;
+using RefPadQAsymm8Workload = RefPadWorkload<DataType::QAsymmU8>;
+using RefPadQSymm16Workload = RefPadWorkload<DataType::QSymmS16>;
} //namespace armnn
template class RefPermuteWorkload<DataType::Float16>;
template class RefPermuteWorkload<DataType::Float32>;
-template class RefPermuteWorkload<DataType::QuantisedAsymm8>;
-template class RefPermuteWorkload<DataType::QuantisedSymm16>;
+template class RefPermuteWorkload<DataType::QAsymmU8>;
+template class RefPermuteWorkload<DataType::QSymmS16>;
} //namespace armnn
using RefPermuteFloat16Workload = RefPermuteWorkload<DataType::Float16>;
using RefPermuteFloat32Workload = RefPermuteWorkload<DataType::Float32>;
-using RefPermuteQAsymm8Workload = RefPermuteWorkload<DataType::QuantisedAsymm8>;
-using RefPermuteQSymm16Workload = RefPermuteWorkload<DataType::QuantisedSymm16>;
+using RefPermuteQAsymm8Workload = RefPermuteWorkload<DataType::QAsymmU8>;
+using RefPermuteQSymm16Workload = RefPermuteWorkload<DataType::QSymmS16>;
} //namespace armnn
\ No newline at end of file
switch(m_TargetType)
{
- case DataType::QuantisedAsymm8:
+ case DataType::QAsymmU8:
{
QuantizeImpl<uint8_t>(input, output, m_NumElements, m_Scale, m_Offset);
break;
QuantizeImpl<int8_t>(input, output, m_NumElements, m_Scale, m_Offset);
break;
}
- case DataType::QuantisedSymm16:
+ case DataType::QSymmS16:
{
QuantizeImpl<int16_t>(input, output, m_NumElements, m_Scale, 0);
break;
}
template<>
-inline auto ParseDataArray<armnn::DataType::QuantisedAsymm8>(std::istream& stream,
+inline auto ParseDataArray<armnn::DataType::QAsymmU8>(std::istream& stream,
const float& quantizationScale,
const int32_t& quantizationOffset)
{
}
else if (m_OutputType == "qasymm8")
{
- return armnn::DataType::QuantisedAsymm8;
+ return armnn::DataType::QAsymmU8;
}
else
{
imageDataContainers.push_back(PrepareImageTensor<int>(
imagePath, newWidth, newHeight, normParams, batchSize, outputLayout));
break;
- case armnn::DataType::QuantisedAsymm8:
+ case armnn::DataType::QAsymmU8:
imageDataContainers.push_back(PrepareImageTensor<uint8_t>(
imagePath, newWidth, newHeight, normParams, batchSize, outputLayout));
break;
case armnn::DataType::Signed32:
normParams.mean = { 128.0, 128.0, 128.0 };
break;
- case armnn::DataType::QuantisedAsymm8:
+ case armnn::DataType::QAsymmU8:
default:
break;
}
inputTensorDataLayout));
outputDataContainers = { vector<int>(outputNumElements) };
break;
- case armnn::DataType::QuantisedAsymm8:
+ case armnn::DataType::QAsymmU8:
inputDataContainers.push_back(
PrepareImageTensor<uint8_t>(imagePath.string(),
inputTensorWidth, inputTensorHeight,
}
template<>
-auto ParseDataArray<armnn::DataType::QuantisedAsymm8>(std::istream& stream)
+auto ParseDataArray<armnn::DataType::QAsymmU8>(std::istream& stream)
{
return ParseArrayImpl<uint8_t>(stream,
[](const std::string& s) { return boost::numeric_cast<uint8_t>(std::stoi(s)); });
}
template<>
-auto ParseDataArray<armnn::DataType::QuantisedAsymm8>(std::istream& stream,
+auto ParseDataArray<armnn::DataType::QAsymmU8>(std::istream& stream,
const float& quantizationScale,
const int32_t& quantizationOffset)
{
const int qOffset = qParams.value().second;
tensorData = readFromFile ?
- ParseDataArray<armnn::DataType::QuantisedAsymm8>(inputTensorFile, qScale, qOffset) :
- GenerateDummyTensorData<armnn::DataType::QuantisedAsymm8>(numElements);
+ ParseDataArray<armnn::DataType::QAsymmU8>(inputTensorFile, qScale, qOffset) :
+ GenerateDummyTensorData<armnn::DataType::QAsymmU8>(numElements);
}
else
{
else if (dataTypeStr.compare("qasymm8") == 0)
{
tensorData = readFromFile ?
- ParseDataArray<armnn::DataType::QuantisedAsymm8>(inputTensorFile) :
- GenerateDummyTensorData<armnn::DataType::QuantisedAsymm8>(numElements);
+ ParseDataArray<armnn::DataType::QAsymmU8>(inputTensorFile) :
+ GenerateDummyTensorData<armnn::DataType::QAsymmU8>(numElements);
}
else
{