m_Value = Float32ToBFloat16(v).Val();
}
+ operator float() const
+ {
+ return ToFloat32();
+ }
+
+ BFloat16& operator=(const BFloat16& other)
+ {
+ m_Value = other.Val();
+ return *this;
+ }
+
BFloat16& operator=(float v)
{
m_Value = Float32ToBFloat16(v).Val();
return m_Value == r.Val();
}
- bool operator==(const float& r) const
- {
- return ToFloat32() == r;
- }
-
static BFloat16 Float32ToBFloat16(const float v)
{
if (std::isnan(v))
#include <armnn/utility/IgnoreUnused.hpp>
#include <armnn/TypesUtils.hpp>
+#include <BFloat16.hpp>
#include <Half.hpp>
#include <initializer_list>
}
};
+template<>
+struct SelectiveQuantizer<armnn::BFloat16, false>
+{
+ static armnn::BFloat16 Quantize(float value, float scale, int32_t offset)
+ {
+ armnn::IgnoreUnused(scale, offset);
+ return armnn::BFloat16(value);
+ }
+
+ static float Dequantize(armnn::BFloat16 value, float scale, int32_t offset)
+ {
+ armnn::IgnoreUnused(scale, offset);
+ return value;
+ }
+};
+
template<typename T>
T SelectiveQuantize(float value, float scale, int32_t offset)
{
switch (dataType)
{
+
case DataType::Float16:
return MakeWorkloadForType<Float16Workload>::Func(descriptor, info, std::forward<Args>(args)...);
case DataType::Float32:
return MakeWorkloadForType<Int32Workload>::Func(descriptor, info, std::forward<Args>(args)...);
case DataType::Boolean:
return MakeWorkloadForType<BooleanWorkload>::Func(descriptor, info, std::forward<Args>(args)...);
+ case DataType::BFloat16:
case DataType::QSymmS16:
return nullptr;
default:
{
switch (inputDataType)
{
+ case DataType::BFloat16:
+ return DataType::BFloat16;
case DataType::Float16:
return DataType::Float16;
case DataType::Float32:
std::vector<DataType> supportedTypes =
{
+ DataType::BFloat16,
DataType::Float16,
DataType::Float32,
DataType::QAsymmS8,
std::vector<DataType> supportedInputTypes =
{
+ DataType::BFloat16,
DataType::Float16,
DataType::Float32,
DataType::QAsymmU8,
std::vector<DataType> supportedTypes =
{
+ DataType::BFloat16,
DataType::Float16,
DataType::Float32,
DataType::QAsymmS8,
// Check the supported data types
std::vector<DataType> supportedTypes =
{
+ DataType::BFloat16,
DataType::Float32,
DataType::Float16,
DataType::Boolean,
// Check the supported data types
std::vector<DataType> supportedTypes =
{
+ DataType::BFloat16,
DataType::Float32,
DataType::Float16,
DataType::Boolean,
// Check the supported data types
std::vector<DataType> supportedTypes =
{
+ DataType::BFloat16,
DataType::Float32,
DataType::Float16,
DataType::Boolean,
// Check the supported data types
std::vector<DataType> supportedTypes =
{
+ DataType::BFloat16,
DataType::Float32,
DataType::Float16,
DataType::QAsymmS8,
// Check the supported data types
std::vector<DataType> supportedTypes =
{
+ DataType::BFloat16,
DataType::Float16,
DataType::Float32,
DataType::QAsymmU8,
std::vector<DataType> supportedTypes =
{
+ DataType::BFloat16,
DataType::Float32,
DataType::Float16,
DataType::QAsymmS8,
std::vector<DataType> supportedTypes =
{
+ DataType::BFloat16,
DataType::Float32,
DataType::QAsymmU8,
DataType::QAsymmS8,
std::vector<DataType> supportedTypes =
{
+ DataType::BFloat16,
DataType::Float16,
DataType::Float32,
DataType::QAsymmU8,
std::vector<DataType> supportedTypes =
{
+ DataType::BFloat16,
DataType::Float32,
DataType::QAsymmS8,
DataType::QAsymmU8,
std::vector<DataType> supportedTypes =
{
+ DataType::BFloat16,
DataType::Float32,
DataType::QAsymmU8,
DataType::QAsymmS8,
std::vector<DataType> supportedTypes =
{
+ DataType::BFloat16,
DataType::Float32,
DataType::Float16,
DataType::QAsymmS8,
std::vector<DataType> supportedTypes =
{
+ DataType::BFloat16,
DataType::Float16,
DataType::Float32,
DataType::QAsymmU8,
std::vector<DataType> supportedTypes =
{
+ DataType::BFloat16,
DataType::Float16,
DataType::Float32,
DataType::QAsymmS8,
// Check the supported data types
std::vector<DataType> supportedTypes =
{
+ DataType::BFloat16,
DataType::Float32,
DataType::Float16
};
// Check the supported data types
std::vector<DataType> supportedTypes =
{
+ DataType::BFloat16,
DataType::Float32,
DataType::Float16,
DataType::QAsymmU8,
std::vector<DataType> supportedTypes =
{
+ DataType::BFloat16,
DataType::Float32,
DataType::Float16,
};
// Check the supported data types
std::vector<DataType> supportedTypes =
{
+ DataType::BFloat16,
DataType::Float32,
DataType::Float16,
DataType::Signed32,
// Check the supported data types
std::vector<DataType> supportedTypes =
{
+ DataType::BFloat16,
DataType::Float32,
DataType::Float16,
DataType::Signed32,
std::vector<DataType> supportedTypes =
{
- DataType::Float16,
- DataType::Float32,
- DataType::QAsymmU8,
- DataType::QSymmS16
+ DataType::BFloat16,
+ DataType::Float16,
+ DataType::Float32,
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
std::vector<DataType> supportedTypes =
{
+ DataType::BFloat16,
DataType::Float32,
DataType::Float16,
DataType::QAsymmU8,
std::vector<DataType> supportedTypes =
{
+ DataType::BFloat16,
DataType::Float32,
DataType::Float16,
DataType::QSymmS16
std::vector<DataType> supportedTypes =
{
+ DataType::BFloat16,
DataType::Float16,
DataType::Float32,
DataType::QSymmS16
DataType::Float32,
DataType::QAsymmU8,
DataType::QSymmS16,
- DataType::Float16
+ DataType::Float16,
+ DataType::BFloat16
};
ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
DataType::Float32,
DataType::QAsymmU8,
DataType::QSymmS16,
- DataType::Float16
+ DataType::Float16,
+ DataType::BFloat16
};
ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
std::vector<DataType> supportedTypes =
{
+ DataType::BFloat16,
DataType::Float16,
DataType::Float32,
DataType::Signed32,
std::vector<DataType> supportedTypes =
{
+ DataType::BFloat16,
DataType::Float32,
DataType::Float16,
DataType::QAsymmU8,
std::vector<DataType> supportedTypes =
{
+ DataType::BFloat16,
DataType::Float32,
DataType::Float16,
DataType::QSymmS8,
std::vector<DataType> supportedTypes =
{
+ DataType::BFloat16,
DataType::Float32,
DataType::Float16,
DataType::QAsymmU8,
std::vector<DataType> supportedTypes =
{
+ DataType::BFloat16,
DataType::Float16,
DataType::Float32,
DataType::QAsymmU8,
std::vector<DataType> supportedTypes =
{
+ DataType::BFloat16,
DataType::Float16,
DataType::Float32,
DataType::Signed32,
std::vector<DataType> supportedTypes =
{
+ DataType::BFloat16,
DataType::Float16,
DataType::Float32,
DataType::QAsymmU8,
std::vector<DataType> supportedTypes =
{
+ DataType::BFloat16,
DataType::Float16,
DataType::Float32,
DataType::QAsymmU8,
const std::vector<DataType> supportedInputTypes =
{
+ DataType::BFloat16,
DataType::Float32,
DataType::Float16,
DataType::QAsymmU8,
std::vector<DataType> supportedTypes =
{
+ DataType::BFloat16,
DataType::Float32,
DataType::Float16
};
std::vector<DataType> supportedTypes =
{
+ DataType::BFloat16,
DataType::Float32,
DataType::QAsymmU8,
DataType::QSymmS16
std::vector<DataType> supportedTypes
{
+ DataType::BFloat16,
DataType::Float16,
DataType::Float32,
DataType::QAsymmU8,
std::vector<DataType> supportedTypes =
{
+ DataType::BFloat16,
DataType::Float32,
DataType::Float16,
DataType::QAsymmU8,
std::vector<DataType> supportedTypes =
{
+ DataType::BFloat16,
DataType::Float16,
DataType::Float32,
DataType::QAsymmU8,
std::vector<DataType> supportedTypes =
{
+ DataType::BFloat16,
DataType::Float32,
DataType::Float16,
DataType::QAsymmU8,
std::vector<DataType> supportedTypes =
{
+ DataType::BFloat16,
DataType::Float16,
DataType::Float32,
DataType::QAsymmU8,
TensorInfo biasInfo;
const TensorInfo * biasInfoPtr = nullptr;
+ static const TensorInfo dummyBFloat16Bias(TensorShape({1,1,1,1}), DataType::BFloat16);
static const TensorInfo dummyFloat16Bias(TensorShape({1,1,1,1}), DataType::Float16);
static const TensorInfo dummyFloat32Bias(TensorShape({1,1,1,1}), DataType::Float32);
static const TensorInfo dummyQA8Bias(TensorShape({1,1,1,1}), DataType::Signed32);
// If biases are not enabled pass a dummy tensorinfo for the validation
switch(input.GetDataType())
{
+ case DataType::BFloat16:
+ {
+ biasInfoPtr = &dummyBFloat16Bias;
+ break;
+ }
case DataType::Float16:
{
biasInfoPtr = &dummyFloat16Bias;
switch(weightsType.value())
{
+ case armnn::DataType::BFloat16:
case armnn::DataType::Float16:
case armnn::DataType::Float32:
return weightsType;
return Concat3dDim1TestImpl<DataType::Float16>(workloadFactory, memoryManager, 0.0f, 0);
}
+LayerTestResult<BFloat16, 3> ConcatBFloat16Test(
+ IWorkloadFactory& workloadFactory,
+ const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+ return Concat3dDim1TestImpl<DataType::BFloat16>(workloadFactory, memoryManager, 0.0f, 0);
+}
+
LayerTestResult<uint8_t, 3> ConcatUint8DifferentQParamsTest(
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
#include "LayerTestResult.hpp"
-#include <ResolveType.hpp>
+#include <BFloat16.hpp>
#include <Half.hpp>
+#include <ResolveType.hpp>
#include <armnn/backends/IBackendInternal.hpp>
#include <backendsCommon/WorkloadFactory.hpp>
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+LayerTestResult<armnn::BFloat16, 3> ConcatBFloat16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
LayerTestResult<armnn::Half, 3> ConcatFloat16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
//
// Explicit template specializations
//
+template LayerTestResult<armnn::ResolveType<armnn::DataType::BFloat16>, 4>
+Convolution2d3x3Dilation3x3Test<armnn::DataType::BFloat16, armnn::DataType::BFloat16>(
+ armnn::IWorkloadFactory&,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
+ bool,
+ armnn::DataLayout);
template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
Convolution2d3x3Dilation3x3Test<armnn::DataType::Float32, armnn::DataType::Float32>(
bool,
armnn::DataLayout);
+template LayerTestResult<armnn::ResolveType<armnn::DataType::BFloat16>, 4>
+Convolution2d2x3x3Dilation3x3Test<armnn::DataType::BFloat16, armnn::DataType::BFloat16>(
+ armnn::IWorkloadFactory&,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
+ bool,
+ armnn::DataLayout);
+
template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
Convolution2d2x3x3Dilation3x3Test<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
armnn::IWorkloadFactory&,
bool,
armnn::DataLayout);
+template LayerTestResult<armnn::ResolveType<armnn::DataType::BFloat16>, 4>
+Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<armnn::DataType::BFloat16, armnn::DataType::BFloat16>(
+ armnn::IWorkloadFactory &workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager,
+ bool biasEnabled,
+ const armnn::DataLayout layout);
+
template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<armnn::DataType::Float32, armnn::DataType::Float32>(
armnn::IWorkloadFactory &workloadFactory,
bool biasEnabled,
const armnn::DataLayout layout);
+template LayerTestResult<armnn::ResolveType<armnn::DataType::BFloat16>, 4>
+DepthwiseConvolution2d3x3Dilation3x3Test<armnn::DataType::BFloat16, armnn::DataType::BFloat16>(
+ armnn::IWorkloadFactory&,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
+ bool,
+ armnn::DataLayout);
+
template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
DepthwiseConvolution2d3x3Dilation3x3Test<armnn::DataType::Float32, armnn::DataType::Float32>(
armnn::IWorkloadFactory&,
bool,
armnn::DataLayout);
+template LayerTestResult<armnn::ResolveType<armnn::DataType::BFloat16>, 4>
+DepthwiseConvolution2d2x3x3Dilation3x3Test<armnn::DataType::BFloat16, armnn::DataType::BFloat16>(
+ armnn::IWorkloadFactory&,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
+ bool,
+ armnn::DataLayout);
+
template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
DepthwiseConvolution2d2x3x3Dilation3x3Test<armnn::DataType::Float32, armnn::DataType::Float32>(
armnn::IWorkloadFactory&,
bool,
armnn::DataLayout);
+template LayerTestResult<armnn::ResolveType<armnn::DataType::BFloat16>, 4>
+DepthwiseConvolution2dMult4Test<armnn::DataType::BFloat16, armnn::DataType::BFloat16>(
+ armnn::IWorkloadFactory &workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager,
+ bool biasEnabled,
+ const armnn::DataLayout layout);
+
template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
DepthwiseConvolution2dMult4Test<armnn::DataType::Float32, armnn::DataType::Float32>(
armnn::IWorkloadFactory &workloadFactory,
bool biasEnabled,
const armnn::DataLayout layout);
+template LayerTestResult<armnn::ResolveType<armnn::DataType::BFloat16>, 4>
+DepthwiseConvolution2dMult2Test<armnn::DataType::BFloat16, armnn::DataType::BFloat16>(
+ armnn::IWorkloadFactory &workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager,
+ bool biasEnabled,
+ const armnn::DataLayout layout);
+
template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
DepthwiseConvolution2dMult2Test<armnn::DataType::Float32, armnn::DataType::Float32>(
armnn::IWorkloadFactory &workloadFactory,
{
return Pad4dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
}
+
+LayerTestResult<armnn::BFloat16, 2> PadBFloat162dTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+ return Pad2dTestCommon<armnn::DataType::BFloat16>(workloadFactory, memoryManager, 0.0f, 0);
+}
+
+LayerTestResult<armnn::BFloat16, 2> PadBFloat162dCustomPaddingTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+ return Pad2dTestCommon<armnn::DataType::BFloat16>(workloadFactory, memoryManager, 0.0f, 0, 1.0f);
+}
+
+LayerTestResult<armnn::BFloat16, 3> PadBFloat163dTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+ return Pad3dTestCommon<armnn::DataType::BFloat16>(workloadFactory, memoryManager, 0.0f, 0);
+}
+
+LayerTestResult<armnn::BFloat16, 4> PadBFloat164dTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+ return Pad4dTestCommon<armnn::DataType::BFloat16>(workloadFactory, memoryManager, 0.0f, 0);
+}
LayerTestResult<float, 4> PadFloat324dTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<armnn::BFloat16, 2> PadBFloat162dTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<armnn::BFloat16, 2> PadBFloat162dCustomPaddingTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<armnn::BFloat16, 3> PadBFloat163dTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<armnn::BFloat16, 4> PadBFloat164dTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
outputTensorInfo = armnn::TensorInfo(4, outputShape, ArmnnType);
// Set quantization parameters if the requested type is a quantized type.
+ float qScale = 0.5f;
+ int32_t qOffset = 5;
if(armnn::IsQuantizedType<T>())
{
- inputTensorInfo.SetQuantizationScale(0.5f);
- inputTensorInfo.SetQuantizationOffset(5);
- outputTensorInfo.SetQuantizationScale(0.5f);
- outputTensorInfo.SetQuantizationOffset(5);
+ inputTensorInfo.SetQuantizationScale(qScale);
+ inputTensorInfo.SetQuantizationOffset(qOffset);
+ outputTensorInfo.SetQuantizationScale(qScale);
+ outputTensorInfo.SetQuantizationOffset(qOffset);
}
- std::vector<T> input = std::vector<T>(
+ std::vector<T> input = armnnUtils::QuantizedVector<T>(
{
1, 2,
3, 4,
5, 6,
7, 8
- });
+ },
+ qScale, qOffset);
- std::vector<T> outputExpected = std::vector<T>(
+ std::vector<T> outputExpected = armnnUtils::QuantizedVector<T>(
{
1, 5, 2, 6,
3, 7, 4, 8
- });
+ },
+ qScale, qOffset);
return SimplePermuteTestImpl<T>(workloadFactory, memoryManager,
descriptor, inputTensorInfo,
outputTensorInfo = armnn::TensorInfo(4, outputShape, ArmnnType);
// Set quantization parameters if the requested type is a quantized type.
+ float qScale = 0.5f;
+ int32_t qOffset = 5;
if(armnn::IsQuantizedType<T>())
{
- inputTensorInfo.SetQuantizationScale(0.5f);
- inputTensorInfo.SetQuantizationOffset(5);
- outputTensorInfo.SetQuantizationScale(0.5f);
- outputTensorInfo.SetQuantizationOffset(5);
+ inputTensorInfo.SetQuantizationScale(qScale);
+ inputTensorInfo.SetQuantizationOffset(qOffset);
+ outputTensorInfo.SetQuantizationScale(qScale);
+ outputTensorInfo.SetQuantizationOffset(qOffset);
}
- std::vector<T> input = std::vector<T>(
+ std::vector<T> input = armnnUtils::QuantizedVector<T>(
{
1, 2, 3,
11, 12, 13,
21, 22, 23,
31, 32, 33
- });
+ },
+ qScale, qOffset);
- std::vector<T> outputExpected = std::vector<T>(
+ std::vector<T> outputExpected = armnnUtils::QuantizedVector<T>(
{
1, 11, 21, 31,
2, 12, 22, 32,
3, 13, 23, 33
- });
+ },
+ qScale, qOffset);
return SimplePermuteTestImpl<T>(workloadFactory, memoryManager,
descriptor, inputTensorInfo,
outputTensorInfo = armnn::TensorInfo(4, outputShape, ArmnnType);
// Set quantization parameters if the requested type is a quantized type.
+ float qScale = 0.5f;
+ int32_t qOffset = 5;
if(armnn::IsQuantizedType<T>())
{
- inputTensorInfo.SetQuantizationScale(0.5f);
- inputTensorInfo.SetQuantizationOffset(5);
- outputTensorInfo.SetQuantizationScale(0.5f);
- outputTensorInfo.SetQuantizationOffset(5);
+ inputTensorInfo.SetQuantizationScale(qScale);
+ inputTensorInfo.SetQuantizationOffset(qOffset);
+ outputTensorInfo.SetQuantizationScale(qScale);
+ outputTensorInfo.SetQuantizationOffset(qOffset);
}
- std::vector<T> input = std::vector<T>(
+ std::vector<T> input = armnnUtils::QuantizedVector<T>(
{
1, 11, 21, 31,
2, 12, 22, 32,
3, 13, 23, 33
- });
+ },
+ qScale, qOffset);
- std::vector<T> outputExpected = std::vector<T>(
+ std::vector<T> outputExpected = armnnUtils::QuantizedVector<T>(
{
1, 2, 3,
11, 12, 13,
21, 22, 23,
31, 32, 33,
- });
+ },
+ qScale, qOffset);
return SimplePermuteTestImpl<T>(workloadFactory, memoryManager,
descriptor, inputTensorInfo,
outputTensorInfo = armnn::TensorInfo(4, outputShape, ArmnnType);
// Set quantization parameters if the requested type is a quantized type.
+ float qScale = 0.5f;
+ int32_t qOffset = 5;
if(armnn::IsQuantizedType<T>())
{
- inputTensorInfo.SetQuantizationScale(0.5f);
- inputTensorInfo.SetQuantizationOffset(5);
- outputTensorInfo.SetQuantizationScale(0.5f);
- outputTensorInfo.SetQuantizationOffset(5);
+ inputTensorInfo.SetQuantizationScale(qScale);
+ inputTensorInfo.SetQuantizationOffset(qOffset);
+ outputTensorInfo.SetQuantizationScale(qScale);
+ outputTensorInfo.SetQuantizationOffset(qOffset);
}
- std::vector<T> input = std::vector<T>(
- {
- 1, 2, 3,
- 11, 12, 13,
- 21, 22, 23,
- 31, 32, 33,
- 41, 42, 43,
- 51, 52, 53
- });
-
- std::vector<T> outputExpected = std::vector<T>(
- {
- 1, 11, 21, 31, 41, 51,
- 2, 12, 22, 32, 42, 52,
- 3, 13, 23, 33, 43, 53
- });
+ std::vector<T> input = armnnUtils::QuantizedVector<T>(
+ {
+ 1, 2, 3,
+ 11, 12, 13,
+ 21, 22, 23,
+ 31, 32, 33,
+ 41, 42, 43,
+ 51, 52, 53
+ },
+ qScale, qOffset);
+
+ std::vector<T> outputExpected = armnnUtils::QuantizedVector<T>(
+ {
+ 1, 11, 21, 31, 41, 51,
+ 2, 12, 22, 32, 42, 52,
+ 3, 13, 23, 33, 43, 53
+ },
+ qScale, qOffset);
return SimplePermuteTestImpl<T>(workloadFactory, memoryManager,
descriptor, inputTensorInfo,
outputTensorInfo = armnn::TensorInfo(4, outputShape, ArmnnType);
// Set quantization parameters if the requested type is a quantized type.
+ float qScale = 0.5f;
+ int32_t qOffset = 5;
if(armnn::IsQuantizedType<T>())
{
- inputTensorInfo.SetQuantizationScale(0.5f);
- inputTensorInfo.SetQuantizationOffset(5);
- outputTensorInfo.SetQuantizationScale(0.5f);
- outputTensorInfo.SetQuantizationOffset(5);
+ inputTensorInfo.SetQuantizationScale(qScale);
+ inputTensorInfo.SetQuantizationOffset(qOffset);
+ outputTensorInfo.SetQuantizationScale(qScale);
+ outputTensorInfo.SetQuantizationOffset(qOffset);
}
- std::vector<T> input = std::vector<T>(
+ std::vector<T> input = armnnUtils::QuantizedVector<T>(
{
1, 2,
3, 4,
5, 6,
7, 8
- });
+ },
+ qScale, qOffset);
- std::vector<T> outputExpected = std::vector<T>(
+ std::vector<T> outputExpected = armnnUtils::QuantizedVector<T>(
{
1, 5, 2, 6,
3, 7, 4, 8
- });
+ },
+ qScale, qOffset);
return SimpleTransposeTestImpl<T>(workloadFactory, memoryManager,
descriptor, inputTensorInfo,
outputTensorInfo = armnn::TensorInfo(4, outputShape, ArmnnType);
// Set quantization parameters if the requested type is a quantized type.
+ float qScale = 0.5f;
+ int32_t qOffset = 5;
if(armnn::IsQuantizedType<T>())
{
- inputTensorInfo.SetQuantizationScale(0.5f);
- inputTensorInfo.SetQuantizationOffset(5);
- outputTensorInfo.SetQuantizationScale(0.5f);
- outputTensorInfo.SetQuantizationOffset(5);
+ inputTensorInfo.SetQuantizationScale(qScale);
+ inputTensorInfo.SetQuantizationOffset(qOffset);
+ outputTensorInfo.SetQuantizationScale(qScale);
+ outputTensorInfo.SetQuantizationOffset(qOffset);
}
- std::vector<T> input = std::vector<T>(
+ std::vector<T> input = armnnUtils::QuantizedVector<T>(
{
1, 2, 3,
11, 12, 13,
21, 22, 23,
31, 32, 33
- });
+ },
+ qScale, qOffset);
- std::vector<T> outputExpected = std::vector<T>(
+ std::vector<T> outputExpected = armnnUtils::QuantizedVector<T>(
{
1, 11, 21, 31,
2, 12, 22, 32,
3, 13, 23, 33
- });
+ },
+ qScale, qOffset);
return SimpleTransposeTestImpl<T>(workloadFactory, memoryManager,
descriptor, inputTensorInfo,
outputTensorInfo = armnn::TensorInfo(4, outputShape, ArmnnType);
// Set quantization parameters if the requested type is a quantized type.
+ float qScale = 0.5f;
+ int32_t qOffset = 5;
if(armnn::IsQuantizedType<T>())
{
- inputTensorInfo.SetQuantizationScale(0.5f);
- inputTensorInfo.SetQuantizationOffset(5);
- outputTensorInfo.SetQuantizationScale(0.5f);
- outputTensorInfo.SetQuantizationOffset(5);
+ inputTensorInfo.SetQuantizationScale(qScale);
+ inputTensorInfo.SetQuantizationOffset(qOffset);
+ outputTensorInfo.SetQuantizationScale(qScale);
+ outputTensorInfo.SetQuantizationOffset(qOffset);
}
- std::vector<T> input = std::vector<T>(
+ std::vector<T> input = armnnUtils::QuantizedVector<T>(
{
1, 11, 21, 31,
2, 12, 22, 32,
3, 13, 23, 33
- });
+ },
+ qScale, qOffset);
- std::vector<T> outputExpected = std::vector<T>(
+ std::vector<T> outputExpected = armnnUtils::QuantizedVector<T>(
{
1, 2, 3,
11, 12, 13,
21, 22, 23,
31, 32, 33,
- });
+ },
+ qScale, qOffset);
return SimpleTransposeTestImpl<T>(workloadFactory, memoryManager,
descriptor, inputTensorInfo,
outputTensorInfo = armnn::TensorInfo(4, outputShape, ArmnnType);
// Set quantization parameters if the requested type is a quantized type.
+ float qScale = 0.5f;
+ int32_t qOffset = 5;
if(armnn::IsQuantizedType<T>())
{
- inputTensorInfo.SetQuantizationScale(0.5f);
- inputTensorInfo.SetQuantizationOffset(5);
- outputTensorInfo.SetQuantizationScale(0.5f);
- outputTensorInfo.SetQuantizationOffset(5);
+ inputTensorInfo.SetQuantizationScale(qScale);
+ inputTensorInfo.SetQuantizationOffset(qOffset);
+ outputTensorInfo.SetQuantizationScale(qScale);
+ outputTensorInfo.SetQuantizationOffset(qOffset);
}
- std::vector<T> input = std::vector<T>(
+ std::vector<T> input = armnnUtils::QuantizedVector<T>(
{
1, 2, 3,
11, 12, 13,
31, 32, 33,
41, 42, 43,
51, 52, 53
- });
+ },
+ qScale, qOffset);
- std::vector<T> outputExpected = std::vector<T>(
+ std::vector<T> outputExpected = armnnUtils::QuantizedVector<T>(
{
1, 11, 21, 31, 41, 51,
2, 12, 22, 32, 42, 52,
3, 13, 23, 33, 43, 53
- });
+ },
+ qScale, qOffset);
return SimpleTransposeTestImpl<T>(workloadFactory, memoryManager,
descriptor, inputTensorInfo,
// Define supported types.
std::array<DataType,6> supportedTypes = {
+ DataType::BFloat16,
DataType::Float32,
DataType::Float16,
DataType::QAsymmS8,
bool supported = true;
std::array<DataType,6> supportedTypes = {
+ DataType::BFloat16,
DataType::Float32,
DataType::Float16,
DataType::QAsymmS8,
{
IgnoreUnused(descriptor);
- std::array<DataType, 4> supportedTypes =
+ std::array<DataType, 5> supportedTypes =
{
+ DataType::BFloat16,
DataType::Float32,
DataType::QAsymmU8,
DataType::QSymmS16,
{
IgnoreUnused(descriptor);
- std::array<DataType, 4> supportedTypes =
+ std::array<DataType, 5> supportedTypes =
{
+ DataType::BFloat16,
DataType::Float32,
DataType::Float16,
DataType::QAsymmU8,
std::string outputTensorStr = "output";
// Define supported types.
- std::array<DataType,4> supportedTypes =
+ std::array<DataType,5> supportedTypes =
{
- DataType::Float32,
- DataType::Float16,
- DataType::QAsymmU8,
- DataType::QSymmS16
+ DataType::BFloat16,
+ DataType::Float32,
+ DataType::Float16,
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
{
IgnoreUnused(descriptor);
- std::array<DataType, 4> supportedInputTypes =
+ std::array<DataType, 5> supportedInputTypes =
{
+ DataType::BFloat16,
DataType::Float32,
DataType::Float16,
DataType::QAsymmU8,
IgnoreUnused(descriptor);
bool supported = true;
- std::array<DataType,5> supportedTypes =
+ std::array<DataType,6> supportedTypes =
{
- DataType::Float32,
- DataType::Float16,
- DataType::QAsymmU8,
- DataType::QAsymmS8,
- DataType::QSymmS16
+ DataType::BFloat16,
+ DataType::Float32,
+ DataType::Float16,
+ DataType::QAsymmU8,
+ DataType::QAsymmS8,
+ DataType::QSymmS16
};
supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
bool RefLayerSupport::IsConstantSupported(const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported) const
{
- std::array<DataType,6> supportedTypes =
+ std::array<DataType,7> supportedTypes =
{
+ DataType::BFloat16,
DataType::Float32,
DataType::Signed32,
DataType::QAsymmU8,
bool supported = true;
// Define supported types.
- std::array<DataType,6> supportedTypes =
+ std::array<DataType,7> supportedTypes =
{
+ DataType::BFloat16,
DataType::Float32,
DataType::Float16,
DataType::QAsymmU8,
if (biases.has_value())
{
- std::array<DataType,3> biasesSupportedTypes =
+ std::array<DataType,4> biasesSupportedTypes =
{
+ DataType::BFloat16,
DataType::Float32,
DataType::Float16,
DataType::Signed32
IgnoreUnused(descriptor);
bool supported = true;
- std::array<DataType,4> supportedTypes =
+ std::array<DataType,5> supportedTypes =
{
+ DataType::BFloat16,
DataType::Float32,
DataType::Float16,
DataType::QAsymmU8,
bool supported = true;
// Define supported types.
- std::array<DataType,6> supportedTypes =
+ std::array<DataType,7> supportedTypes =
{
+ DataType::BFloat16,
DataType::Float32,
DataType::Float16,
DataType::QSymmS8,
if (biases.has_value())
{
- std::array<DataType,3> biasesSupportedTypes =
+ std::array<DataType,4> biasesSupportedTypes =
{
+ DataType::BFloat16,
DataType::Float32,
DataType::Float16,
DataType::Signed32
supported &= CheckSupportRule(TypeNotPerAxisQuantized(input), reasonIfUnsupported,
"Reference dequantize: per-axis quantized input not support .");
- std::array<DataType,2> supportedOutputTypes = {
+ std::array<DataType,3> supportedOutputTypes = {
+ DataType::BFloat16,
DataType::Float32,
DataType::Float16
};
bool supported = true;
- std::array<DataType,3> supportedInputTypes =
+ std::array<DataType,4> supportedInputTypes =
{
+ DataType::BFloat16,
DataType::Float32,
DataType::QAsymmU8,
DataType::QSymmS16
{
bool supported = true;
- std::array<DataType,4> supportedTypes = {
+ std::array<DataType,5> supportedTypes = {
+ DataType::BFloat16,
DataType::Float32,
DataType::Float16,
DataType::QAsymmU8,
{
IgnoreUnused(descriptor);
- std::array<DataType, 4> supportedTypes =
+ std::array<DataType, 5> supportedTypes =
{
+ DataType::BFloat16,
DataType::Float32,
DataType::Float16,
DataType::QAsymmU8,
IgnoreUnused(output);
bool supported = true;
- std::array<DataType,3> supportedTypes =
+ std::array<DataType,4> supportedTypes =
{
+ DataType::BFloat16,
DataType::Float32,
DataType::Float16,
DataType::QSymmS16
bool supported = true;
// Define supported types.
- std::array<DataType,5> supportedTypes =
+ std::array<DataType,6> supportedTypes =
{
- DataType::Float32,
- DataType::Float16,
- DataType::QAsymmU8,
- DataType::QAsymmS8,
- DataType::QSymmS16
+ DataType::BFloat16,
+ DataType::Float32,
+ DataType::Float16,
+ DataType::QAsymmU8,
+ DataType::QAsymmS8,
+ DataType::QSymmS16
};
supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
if (descriptor.m_BiasEnabled)
{
// Defined supported types for bias
- std::array<DataType, 3>
+ std::array<DataType, 4>
supportedBiasTypes =
{
+ DataType::BFloat16,
DataType::Float32,
DataType::Float16,
DataType::Signed32
armnn::Optional<std::string&> reasonIfUnsupported) const
{
bool supported = true;
- std::array<DataType,4> supportedTypes =
+ std::array<DataType,5> supportedTypes =
{
+ DataType::BFloat16,
DataType::Float32,
DataType::Float16,
DataType::QAsymmU8,
{
IgnoreUnused(descriptor);
// Define supported types
- std::array<DataType, 4> supportedTypes =
+ std::array<DataType, 3> supportedTypes =
{
+ DataType::BFloat16,
DataType::Float32,
DataType::Float16
};
{
IgnoreUnused(descriptor);
// Define supported types
- std::array<DataType, 4> supportedTypes =
+ std::array<DataType, 5> supportedTypes =
{
+ DataType::BFloat16,
DataType::Float32,
DataType::Float16,
DataType::QAsymmU8,
{
IgnoreUnused(descriptor);
- std::array<DataType, 2> supportedTypes =
+ std::array<DataType, 3> supportedTypes =
{
- DataType::Float32,
- DataType::Float16
+ DataType::BFloat16,
+ DataType::Float32,
+ DataType::Float16
};
bool supported = true;
bool supported = true;
- std::array<DataType,2> supportedTypes = {
+ std::array<DataType,3> supportedTypes = {
+ DataType::BFloat16,
DataType::Float32,
DataType::QSymmS16
};
{
bool supported = true;
- std::array<DataType,5> supportedTypes = {
+ std::array<DataType,6> supportedTypes = {
+ DataType::BFloat16,
DataType::Float32,
DataType::Float16,
DataType::QAsymmS8,
std::string meanLayerStr = "Mean";
std::string outputTensorStr = "output";
- std::array<DataType,4> supportedTypes =
+ std::array<DataType,5> supportedTypes =
{
+ DataType::BFloat16,
DataType::Float32,
DataType::Float16,
DataType::QAsymmU8,
{
bool supported = true;
- std::array<DataType,5> supportedTypes =
+ std::array<DataType,6> supportedTypes =
{
+ DataType::BFloat16,
DataType::Float32,
DataType::Float16,
DataType::QAsymmU8,
{
bool supported = true;
- std::array<DataType,4> supportedTypes = {
+ std::array<DataType,5> supportedTypes = {
+ DataType::BFloat16,
DataType::Float32,
DataType::Float16,
DataType::QAsymmU8,
bool supported = true;
std::array<DataType,6> supportedTypes = {
+ DataType::BFloat16,
DataType::Float32,
DataType::Float16,
DataType::QAsymmU8,
IgnoreUnused(descriptor);
// Define supported types
- std::array<DataType, 4> supportedTypes =
+ std::array<DataType, 5> supportedTypes =
{
+ DataType::BFloat16,
DataType::Float16,
DataType::Float32,
DataType::QAsymmU8,
bool supported = true;
// Define supported output and inputs types.
- std::array<DataType,4> supportedTypes =
+ std::array<DataType,5> supportedTypes =
{
+ DataType::BFloat16,
DataType::Float32,
DataType::Float16,
DataType::QAsymmU8,
bool supported = true;
// Define supported output and inputs types.
- std::array<DataType, 4> supportedTypes =
+ std::array<DataType, 5> supportedTypes =
{
+ DataType::BFloat16,
DataType::Float32,
DataType::Float16,
DataType::QAsymmU8,
bool supported = true;
// Define supported output and inputs types.
- std::array<DataType,5> supportedTypes =
+ std::array<DataType,6> supportedTypes =
{
+ DataType::BFloat16,
DataType::Float32,
DataType::Float16,
DataType::QAsymmS8,
bool supported = true;
// Define supported input types.
- std::array<DataType,6> supportedInputTypes = {
+ std::array<DataType,7> supportedInputTypes = {
+ DataType::BFloat16,
DataType::Float32,
DataType::Float16,
DataType::QAsymmS8,
// Define supported output types.
std::array<DataType,7> supportedOutputTypes =
{
+ DataType::BFloat16,
DataType::Float32,
DataType::Float16,
DataType::Signed32,
Optional<std::string&> reasonIfUnsupported) const
{
bool supported = true;
- std::array<DataType,4> supportedTypes =
+ std::array<DataType,5> supportedTypes =
{
+ DataType::BFloat16,
DataType::Float32,
DataType::Float16,
DataType::QAsymmU8,
{
IgnoreUnused(descriptor);
bool supported = true;
- std::array<DataType,5> supportedTypes =
+ std::array<DataType,6> supportedTypes =
{
+ DataType::BFloat16,
DataType::Float32,
DataType::Float16,
DataType::QAsymmU8,
IgnoreUnused(descriptor);
bool supported = true;
- std::array<DataType, 3> supportedTypes =
+ std::array<DataType, 4> supportedTypes =
{
+ DataType::BFloat16,
DataType::Float32,
DataType::QAsymmU8,
DataType::QSymmS16
{
IgnoreUnused(descriptor);
bool supported = true;
- std::array<DataType,6> supportedTypes =
+ std::array<DataType,7> supportedTypes =
{
- DataType::Float32,
- DataType::Float16,
- DataType::QSymmS8,
- DataType::QAsymmS8,
- DataType::QAsymmU8,
- DataType::QSymmS16
+ DataType::BFloat16,
+ DataType::Float32,
+ DataType::Float16,
+ DataType::QSymmS8,
+ DataType::QAsymmS8,
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
{
IgnoreUnused(descriptor);
bool supported = true;
- std::array<DataType,4> supportedTypes =
+ std::array<DataType,5> supportedTypes =
{
- DataType::Float32,
- DataType::Float16,
- DataType::QAsymmU8,
- DataType::QSymmS16
+ DataType::BFloat16,
+ DataType::Float32,
+ DataType::Float16,
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
IgnoreUnused(descriptor);
bool supported = true;
- std::array<DataType,4> supportedTypes =
+ std::array<DataType,5> supportedTypes =
{
+ DataType::BFloat16,
DataType::Float32,
DataType::Float16,
DataType::QAsymmU8,
{
IgnoreUnused(descriptor);
bool supported = true;
- std::array<DataType,4> supportedTypes =
+ std::array<DataType,5> supportedTypes =
{
+ DataType::BFloat16,
DataType::Float32,
DataType::Float16,
DataType::QAsymmU8,
{
IgnoreUnused(descriptor);
bool supported = true;
- std::array<DataType,4> supportedTypes =
+ std::array<DataType,5> supportedTypes =
{
+ DataType::BFloat16,
DataType::Float32,
DataType::Float16,
DataType::QAsymmU8,
IgnoreUnused(descriptor);
bool supported = true;
- std::array<DataType,4> supportedTypes =
+ std::array<DataType,5> supportedTypes =
{
+ DataType::BFloat16,
DataType::Float32,
DataType::Float16,
DataType::QAsymmU8,
IgnoreUnused(descriptor);
bool supported = true;
- std::array<DataType,3> supportedTypes =
+ std::array<DataType,4> supportedTypes =
{
+ DataType::BFloat16,
DataType::Float32,
DataType::QAsymmU8,
DataType::QSymmS16
{
bool supported = true;
- std::array<DataType,4> supportedTypes = {
+ std::array<DataType,5> supportedTypes = {
+ DataType::BFloat16,
DataType::Float32,
DataType::Float16,
DataType::QAsymmU8,
{
bool supported = true;
- std::array<DataType, 4> supportedTypes
+ std::array<DataType, 5> supportedTypes
{
+ DataType::BFloat16,
DataType::Float32,
DataType::Float16,
DataType::QAsymmU8,
IgnoreUnused(descriptor);
bool supported = true;
- std::array<DataType,4> supportedTypes =
+ std::array<DataType,5> supportedTypes =
{
- DataType::Float32,
- DataType::Float16,
- DataType::QAsymmU8,
- DataType::QSymmS16
+ DataType::BFloat16,
+ DataType::Float32,
+ DataType::Float16,
+ DataType::QAsymmU8,
+ DataType::QSymmS16
};
supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
if (biases.has_value())
{
- std::array<DataType,3> biasesSupportedTypes =
+ std::array<DataType,4> biasesSupportedTypes =
{
- DataType::Float32,
- DataType::Float16,
- DataType::Signed32
+ DataType::BFloat16,
+ DataType::Float32,
+ DataType::Float16,
+ DataType::Signed32
};
supported &= CheckSupportRule(TypeAnyOf(biases.value(), biasesSupportedTypes), reasonIfUnsupported,
"Reference TransposeConvolution2d: biases is not a supported type.");
bool supported = true;
// Define supported output and inputs types.
- std::array<DataType, 4> supportedTypes =
+ std::array<DataType, 5> supportedTypes =
{
+ DataType::BFloat16,
DataType::Float32,
DataType::Float16,
DataType::QAsymmU8,
return IsDataType<DataType::Signed32>(info);
}
+bool IsBFloat16(const WorkloadInfo& info)
+{
+ return IsDataType<DataType::BFloat16>(info);
+}
+
bool IsFloat16(const WorkloadInfo& info)
{
return IsDataType<DataType::Float16>(info);
{
return std::make_unique<RefPadFloat16Workload>(descriptor, info);
}
+ else if (IsBFloat16(info))
+ {
+ return std::make_unique<RefPadBFloat16Workload>(descriptor, info);
+ }
return MakeWorkload<RefPadFloat32Workload, RefPadQAsymm8Workload>(descriptor, info);
}
{
return std::make_unique<RefPermuteQSymm16Workload>(descriptor, info);
}
+ else if (IsBFloat16(info))
+ {
+ return std::make_unique<RefPermuteBFloat16Workload>(descriptor, info);
+ }
return MakeWorkloadHelper<RefPermuteFloat16Workload, RefPermuteFloat32Workload, RefPermuteQAsymm8Workload,
NullWorkload, NullWorkload, NullWorkload>(descriptor, info);
}
{
return std::make_unique<RefTransposeQSymm16Workload>(descriptor, info);
}
+ else if (IsBFloat16(info))
+ {
+ return std::make_unique<RefTransposeBFloat16Workload>(descriptor, info);
+ }
return MakeWorkloadHelper<RefTransposeFloat16Workload, RefTransposeFloat32Workload, RefTransposeQAsymm8Workload,
NullWorkload, NullWorkload, NullWorkload>(descriptor, info);
}
BOOST_CHECK(supportChecker.IsAdditionSupported(in0, in1, out, reasonNotSupported));
}
+BOOST_AUTO_TEST_CASE(IsLayerSupportedBFloat16Reference)
+{
+ armnn::RefWorkloadFactory factory;
+ IsLayerSupportedTests<armnn::RefWorkloadFactory, armnn::DataType::BFloat16>(&factory);
+}
+
BOOST_AUTO_TEST_CASE(IsLayerSupportedFloat16Reference)
{
armnn::RefWorkloadFactory factory;
ARMNN_AUTO_TEST_CASE(SimpleConvolution2dSquareNhwc, SimpleConvolution2d3x3NhwcTest, false)
+ARMNN_AUTO_TEST_CASE(Convolution2d3x3Dilation3x3BFloat16,
+ Convolution2d3x3Dilation3x3Test<DataType::BFloat16, DataType::BFloat16>,
+ false,
+ DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(Convolution2d3x3Dilation3x3NhwcBFloat16,
+ Convolution2d3x3Dilation3x3Test<DataType::BFloat16, DataType::BFloat16>,
+ false,
+ DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(Convolution2d3x3Dilation3x3,
Convolution2d3x3Dilation3x3Test<DataType::Float32, DataType::Float32>,
false,
false,
DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(Convolution2d2x3x3Dilation3x3BFloat16,
+ Convolution2d2x3x3Dilation3x3Test<DataType::BFloat16, DataType::BFloat16>,
+ false,
+ DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(Convolution2d2x3x3Dilation3x3NhwcBFloat16,
+ Convolution2d2x3x3Dilation3x3Test<DataType::BFloat16, DataType::BFloat16>,
+ false,
+ DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(Convolution2d2x3x3Dilation3x3,
Convolution2d2x3x3Dilation3x3Test<DataType::Float32, DataType::Float32>,
false,
false,
DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(Convolution2d2x2Dilation2x2Padding2x2Stride3x3BFloat16,
+ Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<DataType::BFloat16, DataType::BFloat16>,
+ false,
+ DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(Convolution2d2x2Dilation2x2Padding2x2Stride3x3NhwcBFloat16,
+ Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<DataType::BFloat16, DataType::BFloat16>,
+ false,
+ DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(Convolution2d2x2Dilation2x2Padding2x2Stride3x3,
Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<DataType::Float32, DataType::Float32>,
false,
DepthwiseConvolution2d3x3Dilation3x3Test<DataType::Float32, DataType::Float32>,
false,
DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2d3x3Dilation3x3BFloat16,
+ DepthwiseConvolution2d3x3Dilation3x3Test<DataType::BFloat16, DataType::BFloat16>,
+ false,
+ DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2d3x3Dilation3x3NhwcBFloat16,
+ DepthwiseConvolution2d3x3Dilation3x3Test<DataType::BFloat16, DataType::BFloat16>,
+ false,
+ DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2d3x3Dilation3x3Uint8,
DepthwiseConvolution2d3x3Dilation3x3Test<DataType::QAsymmU8, DataType::Signed32>,
false,
DepthwiseConvolution2d2x3x3Dilation3x3Test<DataType::Float32, DataType::Float32>,
false,
DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2d2x3x3Dilation3x3BFloat16,
+ DepthwiseConvolution2d2x3x3Dilation3x3Test<DataType::BFloat16, DataType::BFloat16>,
+ false,
+ DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2d2x3x3Dilation3x3NhwcBFloat16,
+ DepthwiseConvolution2d2x3x3Dilation3x3Test<DataType::BFloat16, DataType::BFloat16>,
+ false,
+ DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2d2x3x3Dilation3x3Uint8,
DepthwiseConvolution2d2x3x3Dilation3x3Test<DataType::QAsymmU8, DataType::Signed32>,
false,
DepthwiseConvolution2dMult2Test<armnn::DataType::Float32, armnn::DataType::Float32>,
false,
armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2dMult4BFloat16,
+ DepthwiseConvolution2dMult4Test<armnn::DataType::BFloat16, armnn::DataType::BFloat16>,
+ false,
+ armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2dMult2BFloat16,
+ DepthwiseConvolution2dMult2Test<armnn::DataType::BFloat16, armnn::DataType::BFloat16>,
+ false,
+ armnn::DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2dDepthMul1,
DepthwiseConvolution2dDepthMul1Test, true, DataLayout::NCHW)
// Concat
ARMNN_AUTO_TEST_CASE(SimpleConcat, ConcatTest)
+ARMNN_AUTO_TEST_CASE(ConcatBFloat16, ConcatBFloat16Test)
ARMNN_AUTO_TEST_CASE(ConcatFloat16, ConcatFloat16Test)
ARMNN_AUTO_TEST_CASE(ConcatUint8, ConcatUint8Test)
ARMNN_AUTO_TEST_CASE(ConcatUint8DifferentQParams, ConcatUint8DifferentQParamsTest)
ARMNN_AUTO_TEST_CASE(LogSoftmaxFloat16_4, LogSoftmaxTest4<DataType::Float16>)
// Pad
+ARMNN_AUTO_TEST_CASE(PadBFloat162d, PadBFloat162dTest)
+ARMNN_AUTO_TEST_CASE(PadBFloat162dCustomPadding, PadBFloat162dCustomPaddingTest)
+ARMNN_AUTO_TEST_CASE(PadBFloat163d, PadBFloat163dTest)
+ARMNN_AUTO_TEST_CASE(PadBFloat164d, PadBFloat164dTest)
+
ARMNN_AUTO_TEST_CASE(PadFloat322d, PadFloat322dTest)
ARMNN_AUTO_TEST_CASE(PadFloat322dCustomPadding, PadFloat322dCustomPaddingTest)
ARMNN_AUTO_TEST_CASE(PadFloat323d, PadFloat323dTest)
ARMNN_AUTO_TEST_CASE(Rsqrt3dQuantisedSymm16, Rsqrt3dTest<DataType::QSymmS16>)
// Permute
+ARMNN_AUTO_TEST_CASE(SimplePermuteBFloat16, SimplePermuteTest<DataType::BFloat16>)
+ARMNN_AUTO_TEST_CASE(PermuteBFloat16ValueSet1Test, PermuteValueSet1Test<DataType::BFloat16>)
+ARMNN_AUTO_TEST_CASE(PermuteBFloat16ValueSet2Test, PermuteValueSet2Test<DataType::BFloat16>)
+ARMNN_AUTO_TEST_CASE(PermuteBFloat16ValueSet3Test, PermuteValueSet3Test<DataType::BFloat16>)
ARMNN_AUTO_TEST_CASE(SimplePermuteFloat32, SimplePermuteTest<DataType::Float32>)
ARMNN_AUTO_TEST_CASE(PermuteFloat32ValueSet1Test, PermuteValueSet1Test<DataType::Float32>)
ARMNN_AUTO_TEST_CASE(PermuteFloat32ValueSet2Test, PermuteValueSet2Test<DataType::Float32>)
ARMNN_AUTO_TEST_CASE(Slice1dInt16, Slice1dInt16Test)
// Transpose
+ARMNN_AUTO_TEST_CASE(SimpleTransposeBFloat16, SimpleTransposeTest<DataType::BFloat16>)
+ARMNN_AUTO_TEST_CASE(TransposeBFloat16ValueSet1Test, TransposeValueSet1Test<DataType::BFloat16>)
+ARMNN_AUTO_TEST_CASE(TransposeBFloat16ValueSet2Test, TransposeValueSet2Test<DataType::BFloat16>)
+ARMNN_AUTO_TEST_CASE(TransposeBFloat16ValueSet3Test, TransposeValueSet3Test<DataType::BFloat16>)
ARMNN_AUTO_TEST_CASE(SimpleTransposeFloat32, SimpleTransposeTest<DataType::Float32>)
ARMNN_AUTO_TEST_CASE(TransposeFloat32ValueSet1Test, TransposeValueSet1Test<DataType::Float32>)
ARMNN_AUTO_TEST_CASE(TransposeFloat32ValueSet2Test, TransposeValueSet2Test<DataType::Float32>)
}
}
+template void Pad<BFloat16>(const TensorInfo& inputInfo,
+ const TensorInfo& outputInfo,
+ std::vector<std::pair<unsigned int, unsigned int>> m_PadList,
+ const BFloat16* inputData,
+ BFloat16* outData,
+ const float padValue);
+
template void Pad<float>(const TensorInfo& inputInfo,
const TensorInfo& outputInfo,
std::vector<std::pair<unsigned int, unsigned int>> m_PadList,
Pad(inputInfo, outputInfo, m_Data.m_Parameters.m_PadList, inputData, outputData, m_Data.m_Parameters.m_PadValue);
}
+template class RefPadWorkload<DataType::BFloat16>;
template class RefPadWorkload<DataType::Float32>;
template class RefPadWorkload<DataType::Float16>;
template class RefPadWorkload<DataType::QAsymmU8>;
void Execute() const override;
};
+using RefPadBFloat16Workload = RefPadWorkload<DataType::BFloat16>;
using RefPadFloat32Workload = RefPadWorkload<DataType::Float32>;
using RefPadFloat16Workload = RefPadWorkload<DataType::Float16>;
using RefPadQAsymm8Workload = RefPadWorkload<DataType::QAsymmU8>;
src->Map(), dst->Map(), sizeof(T));
}
+template class RefPermuteWorkload<DataType::BFloat16>;
template class RefPermuteWorkload<DataType::Float16>;
template class RefPermuteWorkload<DataType::Float32>;
template class RefPermuteWorkload<DataType::QAsymmU8>;
void Execute() const override;
};
+using RefPermuteBFloat16Workload = RefPermuteWorkload<DataType::BFloat16>;
using RefPermuteFloat16Workload = RefPermuteWorkload<DataType::Float16>;
using RefPermuteFloat32Workload = RefPermuteWorkload<DataType::Float32>;
using RefPermuteQAsymm8Workload = RefPermuteWorkload<DataType::QAsymmU8>;
armnnUtils::Transpose(GetTensorInfo(src).GetShape(), mappings, src->Map(), dst->Map(), sizeof(T));
}
+template class RefTransposeWorkload<DataType::BFloat16>;
template class RefTransposeWorkload<DataType::Float16>;
template class RefTransposeWorkload<DataType::Float32>;
template class RefTransposeWorkload<DataType::QAsymmU8>;
void Execute() const override;
};
+using RefTransposeBFloat16Workload = RefTransposeWorkload<DataType::BFloat16>;
using RefTransposeFloat16Workload = RefTransposeWorkload<DataType::Float16>;
using RefTransposeFloat32Workload = RefTransposeWorkload<DataType::Float32>;
using RefTransposeQAsymm8Workload = RefTransposeWorkload<DataType::QAsymmU8>;