#include "LayerTests.hpp"
#include "WorkloadTestUtils.hpp"
#include "TensorUtils.hpp"
+#include "TypeUtils.hpp"
#include "test/TensorHelpers.hpp"
#include "TensorCopyUtils.hpp"
static std::vector<float> Bias2({0, 2});
// Helper function that returns either Bias2 or an empty vector depending on whether bias is enabled.
-template<typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
boost::multi_array<T, 1> GetBias2(bool biasEnabled, float qScale, int32_t qOffset)
{
if(biasEnabled)
{
- armnn::TensorInfo biasDesc({static_cast<unsigned int>(Bias2.size())}, armnn::GetDataType<T>());
+ armnn::TensorInfo biasDesc({static_cast<unsigned int>(Bias2.size())}, ArmnnType);
boost::multi_array<T, 1> bias = MakeTensor<T, 1>(biasDesc, QuantizedVector<T>(qScale, qOffset, Bias2));
return bias;
}
}
}
-template<typename T>
+template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> SimpleConvolution2d3x5TestCommon(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout layout)
{
// Use common single-batch 3-channel 16x8 image.
- armnn::TensorInfo inputDesc({1, 3, 8, 16}, armnn::GetDataType<T>());
+ armnn::TensorInfo inputDesc({1, 3, 8, 16}, ArmnnType);
boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, QuantizedVector<T>(qScale, qOffset, ConvInput3x8x16));
// Use a 2-element batch with 3-channel 3x5 kernels.
- armnn::TensorInfo kernelDesc({2, 3, 5, 3}, armnn::GetDataType<T>());
+ armnn::TensorInfo kernelDesc({2, 3, 5, 3}, ArmnnType);
boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
QuantizedVector<T>(qScale, qOffset, {
1, 1, 1,
})));
// Expected output is 2 batch elements of a 1-channel 14x4 image.
- armnn::TensorInfo outputDesc({1, 2, 4, 14}, armnn::GetDataType<T>());
+ armnn::TensorInfo outputDesc({1, 2, 4, 14}, ArmnnType);
boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
QuantizedVector<T>(qScale, qOffset, {
-24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24,
5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
})));
- return SimpleConvolution2dTestImpl<T>(workloadFactory,
- memoryManager,
- input,
- kernel,
- GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(biasEnabled, qScale, qOffset),
- expectedOutput,
- qScale,
- qOffset,
- layout);
+ return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
+ workloadFactory,
+ memoryManager,
+ input,
+ kernel,
+ GetBias2<ArmnnBType>(biasEnabled, qScale, qOffset),
+ expectedOutput,
+ qScale,
+ qOffset,
+ layout);
}
-template<typename T>
+template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
+ typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> SimpleConvolution2d3x3TestCommon(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
// Use a 3x3 kernel, which exercises ArmCompute's direct convolution path.
// Use common single-batch 3-channel 16x8 image.
- armnn::TensorInfo inputDesc({1, 3, 8, 16}, armnn::GetDataType<T>());
+ armnn::TensorInfo inputDesc({1, 3, 8, 16}, ArmnnType);
boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, QuantizedVector<T>(qScale, qOffset, ConvInput3x8x16));
// Use a 2-element batch of 3-channel 3x3 kernels.
- armnn::TensorInfo kernelDesc({2, 3, 3, 3}, armnn::GetDataType<T>());
+ armnn::TensorInfo kernelDesc({2, 3, 3, 3}, ArmnnType);
boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
QuantizedVector<T>(qScale, qOffset, {
1, 1, 1,
})));
// Expected output is 1 batch of a 2-channel 14x6 image.
- armnn::TensorInfo outputDesc({1, 2, 6, 14}, armnn::GetDataType<T>());
+ armnn::TensorInfo outputDesc({1, 2, 6, 14}, ArmnnType);
boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
QuantizedVector<T>(qScale, qOffset, {
-15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15,
3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
})));
- return SimpleConvolution2dTestImpl<T>(workloadFactory,
- memoryManager,
- input,
- kernel,
- GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(biasEnabled, qScale, qOffset),
- expectedOutput,
- qScale,
- qOffset,
- layout);
+ return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
+ workloadFactory,
+ memoryManager,
+ input,
+ kernel,
+ GetBias2<ArmnnBType>(biasEnabled, qScale, qOffset),
+ expectedOutput,
+ qScale,
+ qOffset,
+ layout);
}
-template<typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> SimpleConvolution2d3x3NhwcTestCommon(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
{
// Use common single-batch 5x5 image.
- armnn::TensorInfo inputDesc({1, 3, 4, 1}, armnn::GetDataType<T>());
+ armnn::TensorInfo inputDesc({1, 3, 4, 1}, ArmnnType);
boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc,
{
1, 5, 2, 3,
// Use a 2-element batch of 3-channel 3x3 kernels.
- armnn::TensorInfo kernelDesc({1, 3, 3, 1}, armnn::GetDataType<T>());
+ armnn::TensorInfo kernelDesc({1, 3, 3, 1}, ArmnnType);
boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, {
4, 5, 6,
0, 0, 0,
});
// Expected output is 1 batch of a 5x5 image.
- armnn::TensorInfo outputDesc({1, 3, 4, 1}, armnn::GetDataType<T>());
+ armnn::TensorInfo outputDesc({1, 3, 4, 1}, ArmnnType);
const std::vector<float> outputData =
{
boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, outputData);
- return SimpleConvolution2dNhwcTestImpl<T>(workloadFactory,
- memoryManager,
- input,
- kernel,
- boost::multi_array<T, 1>(),
- expectedOutput,
- dataLayout,
- qScale,
- qOffset);
+ return SimpleConvolution2dNhwcTestImpl<ArmnnType, ArmnnType>(
+ workloadFactory,
+ memoryManager,
+ input,
+ kernel,
+ boost::multi_array<T, 1>(),
+ expectedOutput,
+ dataLayout,
+ qScale,
+ qOffset);
}
-template<typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> SimpleConvolution2d3x3Stride2x2TestCommon(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout& dataLayout)
{
// Input is a single-batch, 1 channel, 5x5 image.
- armnn::TensorInfo inputDesc({1, 5, 5, 1}, armnn::GetDataType<T>());
+ armnn::TensorInfo inputDesc({1, 5, 5, 1}, ArmnnType);
boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc,
{
1, 5, 2, 3, 5,
});
// Use a 3x3 kernel.
- armnn::TensorInfo kernelDesc({1, 3, 3, 1}, armnn::GetDataType<T>());
+ armnn::TensorInfo kernelDesc({1, 3, 3, 1}, ArmnnType);
boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc,
{
4, 5, 6,
});
// Expected output is a single-batch, 1 channel, 3x3 image.
- armnn::TensorInfo outputDesc({1, 3, 3, 1}, armnn::GetDataType<T>());
+ armnn::TensorInfo outputDesc({1, 3, 3, 1}, ArmnnType);
const std::vector<T> outputData =
{
uint32_t strideX = 2;
uint32_t strideY = 2;
- return SimpleConvolution2dNhwcTestImpl<T>(workloadFactory,
- memoryManager,
- input,
- kernel,
- boost::multi_array<T, 1>(),
- expectedOutput,
- dataLayout,
- qScale,
- qOffset,
- padLeft,
- padTop,
- padRight,
- padBottom,
- strideX,
- strideY);
+ return SimpleConvolution2dNhwcTestImpl<ArmnnType, ArmnnType>(
+ workloadFactory,
+ memoryManager,
+ input,
+ kernel,
+ boost::multi_array<T, 1>(),
+ expectedOutput,
+ dataLayout,
+ qScale,
+ qOffset,
+ padLeft,
+ padTop,
+ padRight,
+ padBottom,
+ strideX,
+ strideY);
}
LayerTestResult<float, 4> SimpleConvolution2d3x5Test(
bool biasEnabled,
const armnn::DataLayout layout)
{
- return SimpleConvolution2d3x5TestCommon<float>(workloadFactory, memoryManager, 0.f, 0, biasEnabled, layout);
+ return SimpleConvolution2d3x5TestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
+ workloadFactory, memoryManager, 0.f, 0, biasEnabled, layout);
}
LayerTestResult<uint8_t, 4> SimpleConvolution2d3x5Uint8Test(
bool biasEnabled,
const armnn::DataLayout layout)
{
- return SimpleConvolution2d3x5TestCommon<uint8_t>(workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
+ return SimpleConvolution2d3x5TestCommon<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
+ workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
}
LayerTestResult<float, 4> SimpleConvolution2d3x3Test(
bool biasEnabled,
const armnn::DataLayout layout)
{
- return SimpleConvolution2d3x3TestCommon<float>(workloadFactory, memoryManager, 0.f, 0, biasEnabled, layout);
+ return SimpleConvolution2d3x3TestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
+ workloadFactory, memoryManager, 0.f, 0, biasEnabled, layout);
}
LayerTestResult<float, 4> SimpleConvolution2d3x3NhwcTest(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
bool biasEnabled)
{
- return SimpleConvolution2d3x3NhwcTestCommon<float>(workloadFactory,
- memoryManager,
- 0.f,
- 0,
- biasEnabled,
- armnn::DataLayout::NHWC);
+ return SimpleConvolution2d3x3NhwcTestCommon<armnn::DataType::Float32>(
+ workloadFactory,
+ memoryManager,
+ 0.f,
+ 0,
+ biasEnabled,
+ armnn::DataLayout::NHWC);
}
LayerTestResult<float, 4> SimpleConvolution2d3x3Stride2x2Test(
bool biasEnabled,
const armnn::DataLayout layout)
{
- return SimpleConvolution2d3x3Stride2x2TestCommon<float>(workloadFactory,
- memoryManager,
- 0.f,
- 0,
- biasEnabled,
- layout);
+ return SimpleConvolution2d3x3Stride2x2TestCommon<armnn::DataType::Float32>(
+ workloadFactory,
+ memoryManager,
+ 0.f,
+ 0,
+ biasEnabled,
+ layout);
}
LayerTestResult<uint8_t, 4> SimpleConvolution2d3x3Uint8Test(
bool biasEnabled,
const armnn::DataLayout layout)
{
- return SimpleConvolution2d3x3TestCommon<uint8_t>(workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
+ return SimpleConvolution2d3x3TestCommon<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
+ workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
}
-template<typename T>
+template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
+ typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTestCommon(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
int32_t qOffset)
{
// Use a single-batch 1-channel 3x3 image as input.
- armnn::TensorInfo inputDesc({1, 1, 3, 3}, armnn::GetDataType<T>());
+ armnn::TensorInfo inputDesc({1, 1, 3, 3}, ArmnnType);
boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, std::vector<T>(
QuantizedVector<T>(qScale, qOffset, {
11,21,31,
})));
// Use 1 batch of a 1-channel 2x2 kernel.
- armnn::TensorInfo kernelDesc({1, 1, 2, 2}, armnn::GetDataType<T>());
+ armnn::TensorInfo kernelDesc({1, 1, 2, 2}, ArmnnType);
boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
QuantizedVector<T>(qScale, qOffset, {
-11,-21,
//[-11*0 -21*13 -12*0 -22*0 ; -11*13 -21*23 -12*0 -22*0 ; -11*23 -21*33 -12*0 -22*0 ; -11*33 -21*0 -12*0 -22*0 ..]
//[-11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ..]
//[..... ..... ..... ..... ; ..... ..... ..... ..... ; ..... ..... ..... ..... ; ..... ..... ..... ..... ..]
- armnn::TensorInfo outputDesc({1, 1, 8, 6}, armnn::GetDataType<T>());
+ armnn::TensorInfo outputDesc({1, 1, 8, 6}, ArmnnType);
boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
QuantizedVector<T>(qScale, qOffset, {
0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0
})));
- return SimpleConvolution2dTestImpl<T>(workloadFactory,
- memoryManager,
- input,
- kernel,
- GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(false, qScale, qOffset),
- expectedOutput,
- qScale,
- qOffset,
- layout,
- 1, // Padding left.
- 2, // Padding top.
- 3, // Padding right.
- 4); // Padding bottom.
+ return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
+ workloadFactory,
+ memoryManager,
+ input,
+ kernel,
+ GetBias2<ArmnnBType>(false, qScale, qOffset),
+ expectedOutput,
+ qScale,
+ qOffset,
+ layout,
+ 1, // Padding left.
+ 2, // Padding top.
+ 3, // Padding right.
+ 4); // Padding bottom.
}
-template<typename T>
+template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
+ typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> SimpleConvolution2dAsymmetricPaddingTestCommon(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
int32_t qOffset)
{
// Use a single-batch 1-channel 5x5 image as input.
- armnn::TensorInfo inputDesc({ 1, 1, 5, 5 }, armnn::GetDataType<T>());
+ armnn::TensorInfo inputDesc({ 1, 1, 5, 5 }, ArmnnType);
boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, std::vector<T>(
QuantizedVector<T>(qScale, qOffset, {
11,21,31,41,51,
})));
// Use 1 batch of a 1-channel 4x4 kernel.
- armnn::TensorInfo kernelDesc({ 1, 1, 4, 4 }, armnn::GetDataType<T>());
+ armnn::TensorInfo kernelDesc({ 1, 1, 4, 4 }, ArmnnType);
boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
QuantizedVector<T>(qScale, qOffset, {
-11,-21,-31,-41,
})));
// Expected output is 1 batch of a 1-channel 5x5 image.
- armnn::TensorInfo outputDesc({ 1, 1, 5, 5 }, armnn::GetDataType<T>());
+ armnn::TensorInfo outputDesc({ 1, 1, 5, 5 }, ArmnnType);
std::vector<T> myVec(outputDesc.GetNumElements(), 0);
boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
QuantizedVector<T>(qScale, qOffset, {
-5032, -7256, -9376, -6142, -3368,
})));
- return SimpleConvolution2dTestImpl<T>(workloadFactory,
+ return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
+ workloadFactory,
memoryManager,
input,
kernel,
- GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(false, qScale, qOffset),
+ GetBias2<ArmnnBType>(false, qScale, qOffset),
expectedOutput,
qScale,
qOffset,
2); // Padding bottom.
}
-template<typename T>
+template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
+ typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> DepthwiseConvolution2dAsymmetricTestCommon(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout layout)
{
// Use a single-batch 2-channel 5x5 image as input.
- armnn::TensorInfo inputTensorInfo({ 1, 2, 5, 5 }, armnn::GetDataType<T>());
+ armnn::TensorInfo inputTensorInfo({ 1, 2, 5, 5 }, ArmnnType);
auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(), {
0, 1, 2, 3, 4,
})));
// Use a depth multiplier of 1 on a 2-channel 4x4 kernel.
- armnn::TensorInfo kernelTensorInfo({ 1, 2, 4, 4 }, armnn::GetDataType<T>());
+ armnn::TensorInfo kernelTensorInfo({ 1, 2, 4, 4 }, ArmnnType);
auto kernel = MakeTensor<T, 4>(kernelTensorInfo, std::vector<T>(
QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(), {
32, 31, 30, 29,
// Expected output is 1 batch of a 2-channel 5x5 image.
// Calculated using the python tensorflow library with strideX=1, strideY=1.
- armnn::TensorInfo outputTensorInfo({ 1, 2, 5, 5 }, armnn::GetDataType<T>());
+ armnn::TensorInfo outputTensorInfo({ 1, 2, 5, 5 }, ArmnnType);
boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(
QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(), {
1062, 1580, 1850, 1530, 1117,
3100, 4352, 4452, 3517, 2465
})));
- return DepthwiseConvolution2dAsymmetricTestImpl<T>(workloadFactory,
+ return DepthwiseConvolution2dAsymmetricTestImpl<ArmnnType, ArmnnBType>(
+ workloadFactory,
memoryManager,
input,
kernel,
- GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(biasEnabled, qScale, qOffset),
+ GetBias2<ArmnnBType>(biasEnabled, qScale, qOffset),
expectedOutput,
qScale,
qOffset,
1); // strideY
}
-template<typename T>
+template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
+ typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> DepthwiseConvolution2dNhwcTestCommon(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
int32_t qOffset,
bool biasEnabled)
{
- armnn::TensorInfo inputTensorInfo({ 1, 5, 5, 2}, armnn::GetDataType<T>());
+ armnn::TensorInfo inputTensorInfo({ 1, 5, 5, 2}, ArmnnType);
auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(), {
0, 25,
24, 49
})));
- armnn::TensorInfo kernelTensorInfo({ 1, 2, 4, 4 }, armnn::GetDataType<T>());
+ armnn::TensorInfo kernelTensorInfo({ 1, 2, 4, 4 }, ArmnnType);
auto kernel = MakeTensor<T, 4>(kernelTensorInfo, std::vector<T>(
QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(), {
32, 31, 30, 29,
4, 3, 2, 1
})));
- armnn::TensorInfo outputTensorInfo({ 1, 5, 5, 2}, armnn::GetDataType<T>());
+ armnn::TensorInfo outputTensorInfo({ 1, 5, 5, 2}, ArmnnType);
boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(
QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(), {
1062, 1550,
2457, 2465
})));
- return DepthwiseConvolution2dNhwcTestImpl<T>(workloadFactory,
+ return DepthwiseConvolution2dNhwcTestImpl<ArmnnType, ArmnnBType>(
+ workloadFactory,
memoryManager,
input,
kernel,
- GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(biasEnabled, qScale, qOffset),
+ GetBias2<ArmnnBType>(biasEnabled, qScale, qOffset),
expectedOutput,
qScale,
qOffset,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout layout)
{
- return Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTestCommon<float>(
- workloadFactory, memoryManager, layout, 0.0f, 0);
+ return Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTestCommon
+ <armnn::DataType::Float32, armnn::DataType::Float32>(
+ workloadFactory, memoryManager, layout, 0.0f, 0);
}
LayerTestResult<float, 4> Convolution2dAsymmetricPaddingTest(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout layout)
{
- return SimpleConvolution2dAsymmetricPaddingTestCommon<float>(
+ return SimpleConvolution2dAsymmetricPaddingTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
workloadFactory, memoryManager, layout, 0.0f, 0);
}
bool biasEnabled,
const armnn::DataLayout layout)
{
- return DepthwiseConvolution2dTestImpl<float, float>(
+ return DepthwiseConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
workloadFactory, memoryManager, 0.0f, 0, biasEnabled, layout);
}
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
bool biasEnabled)
{
- return DepthwiseConvolution2dNhwcTestCommon<float>(workloadFactory, memoryManager, 0.0f, 0, biasEnabled);
+ return DepthwiseConvolution2dNhwcTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
+ workloadFactory, memoryManager, 0.0f, 0, biasEnabled);
}
LayerTestResult<float, 4> DepthwiseConvolution2dDepthMul1Test(
bool biasEnabled,
const armnn::DataLayout layout)
{
- return DepthwiseConvolution2dDepthMul1TestImpl<float, float>(
+ return DepthwiseConvolution2dDepthMul1TestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
workloadFactory, memoryManager, 0.0f, 0, biasEnabled, layout);
}
bool biasEnabled,
const armnn::DataLayout layout)
{
- return DepthwiseConvolution2dAsymmetricTestCommon<float>(
+ return DepthwiseConvolution2dAsymmetricTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
workloadFactory, memoryManager, 0.0f, 0, biasEnabled, layout);
}
bool biasEnabled,
const armnn::DataLayout layout)
{
- return DepthwiseConvolution2dTestImpl<uint8_t, int32_t>(
+ return DepthwiseConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
}
bool biasEnabled,
const armnn::DataLayout layout)
{
- return DepthwiseConvolution2dDepthMul1TestImpl<uint8_t, int32_t>(
+ return DepthwiseConvolution2dDepthMul1TestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
}
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
bool biasEnabled)
{
- return Convolution1dTestImpl<float>(workloadFactory, memoryManager, 0.0f, 0, biasEnabled);
+ return Convolution1dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
+ workloadFactory, memoryManager, 0.0f, 0, biasEnabled);
}
LayerTestResult<uint8_t, 4> Convolution1dUint8Test(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
bool biasEnabled)
{
- return Convolution1dTestImpl<uint8_t>(workloadFactory, memoryManager, 0.1f, 128, biasEnabled);
+ return Convolution1dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
+ workloadFactory, memoryManager, 0.1f, 128, biasEnabled);
}
LayerTestResult<float,4> CompareConvolution2dTest(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
armnn::IWorkloadFactory& refWorkloadFactory)
{
- return CompareConvolution2dTestImpl<float>(workloadFactory, memoryManager, refWorkloadFactory);
+ return CompareConvolution2dTestImpl<armnn::DataType::Float32>(
+ workloadFactory, memoryManager, refWorkloadFactory);
}
-template<typename T>
-LayerTestResult<T,4> CompareDepthwiseConvolution2dTest(
+LayerTestResult<float, 4> CompareDepthwiseConvolution2dFloatTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
armnn::IWorkloadFactory& refWorkloadFactory,
const armnn::DataLayout layout)
{
- return CompareDepthwiseConvolution2dTestImpl<T>(workloadFactory, memoryManager, refWorkloadFactory, layout);
+ return CompareDepthwiseConvolution2dTestImpl<armnn::DataType::Float32>(
+ workloadFactory, memoryManager, refWorkloadFactory, layout);
}
-template LayerTestResult<float, 4> CompareDepthwiseConvolution2dTest<float>(
- armnn::IWorkloadFactory&,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
- armnn::IWorkloadFactory&,
- const armnn::DataLayout);
-
-template LayerTestResult<uint8_t, 4> CompareDepthwiseConvolution2dTest<uint8_t>(
- armnn::IWorkloadFactory&,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
- armnn::IWorkloadFactory&,
- const armnn::DataLayout);
+LayerTestResult<uint8_t, 4> CompareDepthwiseConvolution2dUint8Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ armnn::IWorkloadFactory& refWorkloadFactory,
+ const armnn::DataLayout layout)
+{
+ return CompareDepthwiseConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8>(
+ workloadFactory, memoryManager, refWorkloadFactory, layout);
+}
LayerTestResult<float,4> SimpleNormalizationAcrossTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
float beta)
{
- return SimpleSoftmaxTestImpl<float>(workloadFactory, memoryManager, beta);
+ return SimpleSoftmaxTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, beta);
}
LayerTestResult<uint8_t,2> SimpleSoftmaxUint8Test(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
float beta)
{
- return SimpleSoftmaxTestImpl<uint8_t>(workloadFactory, memoryManager, beta);
+ return SimpleSoftmaxTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, beta);
}
LayerTestResult<float,4> CompareNormalizationTest(
armnn::IWorkloadFactory& refWorkloadFactory,
float beta)
{
- return CompareSoftmaxTestImpl<float>(workloadFactory, memoryManager, refWorkloadFactory, beta);
+ return CompareSoftmaxTestImpl<armnn::DataType::Float32>(
+ workloadFactory, memoryManager, refWorkloadFactory, beta);
}
LayerTestResult<uint8_t,2> CompareSoftmaxUint8Test(
armnn::IWorkloadFactory& refWorkloadFactory,
float beta)
{
- return CompareSoftmaxTestImpl<uint8_t>(workloadFactory, memoryManager, refWorkloadFactory, beta);
+ return CompareSoftmaxTestImpl<armnn::DataType::QuantisedAsymm8>(
+ workloadFactory, memoryManager, refWorkloadFactory, beta);
}
std::vector<LayerTestResult<float,3>> SplitterTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SplitterTestCommon<float>(workloadFactory, memoryManager);
+ return SplitterTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
}
std::vector<LayerTestResult<uint8_t,3>> SplitterUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SplitterTestCommon<uint8_t>(workloadFactory, memoryManager, 1.0f, 0);
+ return SplitterTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
}
LayerTestResult<float, 3> CopyViaSplitterTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return CopyViaSplitterTestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
+ return CopyViaSplitterTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
}
LayerTestResult<uint8_t, 3> CopyViaSplitterUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return CopyViaSplitterTestImpl<uint8_t>(workloadFactory, memoryManager, 1.0f, 0);
+ return CopyViaSplitterTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
}
LayerTestResult<float, 2> LstmLayerFloat32WithCifgWithPeepholeNoProjectionTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- armnn::TensorInfo inputDesc({ 2, 2 }, armnn::GetDataType<float>());
+ armnn::TensorInfo inputDesc({ 2, 2 }, armnn::DataType::Float32);
boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
{ 2., 3., 3., 4. }));
- armnn::TensorInfo outputDesc({ 2, 4 }, armnn::GetDataType<float>());
+ armnn::TensorInfo outputDesc({ 2, 4 }, armnn::DataType::Float32);
boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
{-0.36444446f, -0.00352185f, 0.12886585f, -0.05163646f,
-0.42734814f, -0.00478661f, 0.13455015f, -0.03560682f}));
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- armnn::TensorInfo inputDesc({ 2, 5 }, armnn::GetDataType<float>());
+ armnn::TensorInfo inputDesc({ 2, 5 }, armnn::DataType::Float32);
boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
{0.787926f, 0.151646f, 0.071352f, 0.118426f, 0.458058f,
0.295743f, 0.544053f, 0.690064f, 0.858138f, 0.497181f}));
- armnn::TensorInfo outputDesc({ 2, 16 }, armnn::GetDataType<float>());
+ armnn::TensorInfo outputDesc({ 2, 16 }, armnn::DataType::Float32);
boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
{-0.00396806f, 0.029352f, -0.00279226f, 0.0159977f, -0.00835576f,
-0.0211779f, 0.0283512f, -0.0114597f, 0.00907307f, -0.0244004f,
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- armnn::TensorInfo inputDesc({2, 2}, armnn::GetDataType<float>());
+ armnn::TensorInfo inputDesc({2, 2}, armnn::DataType::Float32);
boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
{2., 3., 3., 4.}));
- armnn::TensorInfo outputDesc({2, 4}, armnn::GetDataType<float>());
+ armnn::TensorInfo outputDesc({2, 4}, armnn::DataType::Float32);
boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
{{-0.02973187f, 0.1229473f, 0.20885126f, -0.15358765f,
-0.0185422f, 0.11281417f, 0.24466537f, -0.1826292f}}));
return ret;
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> AdditionBroadcastTestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
float qScale,
int32_t qOffset)
{
- armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 1}, armnn::GetDataType<T>());
- armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 2, 3}, armnn::GetDataType<T>());
- armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, armnn::GetDataType<T>());
+ armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 1}, ArmnnType);
+ armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 2, 3}, ArmnnType);
+ armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
if (armnn::IsQuantizedType<T>())
{
return ret;
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> AdditionBroadcast1ElementTestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
float qScale,
int32_t qOffset)
{
- armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 3}, armnn::GetDataType<T>());
- armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 1, 1}, armnn::GetDataType<T>());
- armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, armnn::GetDataType<T>());
+ armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
+ armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 1, 1}, ArmnnType);
+ armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
if (armnn::IsQuantizedType<T>())
{
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return AdditionBroadcastTestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
+ return AdditionBroadcastTestImpl<armnn::DataType::Float32>(
+ workloadFactory, memoryManager, 0.0f, 0);
}
LayerTestResult<uint8_t, 4> AdditionBroadcastUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return AdditionBroadcastTestImpl<uint8_t>(workloadFactory, memoryManager, 2.f, 0);
+ return AdditionBroadcastTestImpl<armnn::DataType::QuantisedAsymm8>(
+ workloadFactory, memoryManager, 2.f, 0);
}
LayerTestResult<float, 4> AdditionBroadcast1ElementTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return AdditionBroadcast1ElementTestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
+ return AdditionBroadcast1ElementTestImpl<armnn::DataType::Float32>(
+ workloadFactory, memoryManager, 0.0f, 0);
}
LayerTestResult<uint8_t, 4> AdditionBroadcast1ElementUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return AdditionBroadcast1ElementTestImpl<uint8_t>(workloadFactory, memoryManager, 0.1333333f, 128);
+ return AdditionBroadcast1ElementTestImpl<armnn::DataType::QuantisedAsymm8>(
+ workloadFactory, memoryManager, 0.1333333f, 128);
}
LayerTestResult<float,4> CompareAdditionTest(
}
namespace {
- template <typename Descriptor, typename dataType>
- LayerTestResult<dataType, 4> ElementwiseTestHelper
+ template <typename Descriptor, armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+ LayerTestResult<T, 4> ElementwiseTestHelper
(armnn::IWorkloadFactory & workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,
- const unsigned int shape0[4], std::vector<dataType> values0,
- const unsigned int shape1[4], std::vector<dataType> values1,
- const unsigned int outShape[4], std::vector<dataType> outValues,
+ const unsigned int shape0[4], std::vector<T> values0,
+ const unsigned int shape1[4], std::vector<T> values1,
+ const unsigned int outShape[4], std::vector<T> outValues,
float qScale = 0.0f, int qOffset = 0)
{
const size_t dimensionCount = 4;
- armnn::TensorInfo inputTensorInfo0{dimensionCount, shape0, armnn::GetDataType<dataType>()};
- armnn::TensorInfo inputTensorInfo1{dimensionCount, shape1, armnn::GetDataType<dataType>()};
- armnn::TensorInfo outputTensorInfo{dimensionCount, outShape, armnn::GetDataType<dataType>()};
+ armnn::TensorInfo inputTensorInfo0{dimensionCount, shape0, ArmnnType};
+ armnn::TensorInfo inputTensorInfo1{dimensionCount, shape1, ArmnnType};
+ armnn::TensorInfo outputTensorInfo{dimensionCount, outShape, ArmnnType};
- auto input0 = MakeTensor<dataType, 4>(inputTensorInfo0, values0);
- auto input1 = MakeTensor<dataType, 4>(inputTensorInfo1, values1);
+ auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
+ auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
- if (armnn::IsQuantizedType<dataType>())
+ if (armnn::IsQuantizedType<T>())
{
inputTensorInfo0.SetQuantizationScale(qScale);
inputTensorInfo0.SetQuantizationOffset(qOffset);
outputTensorInfo.SetQuantizationOffset(qOffset);
}
- LayerTestResult<dataType,4> ret(outputTensorInfo);
+ LayerTestResult<T,4> ret(outputTensorInfo);
std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
- ret.outputExpected = MakeTensor<dataType, 4>(outputTensorInfo, outValues);
+ ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
return ret;
}
}
std::vector<float> output({ 1, 1, 1, 1, 0, 0, 0, 0,
0, 0, 0, 0, 1, 1, 1, 1 });
- return ElementwiseTestHelper<armnn::EqualQueueDescriptor, float>
- (workloadFactory,
- memoryManager,
- shape,
- input0,
- shape,
- input1,
- shape,
- output);
+ return ElementwiseTestHelper<armnn::EqualQueueDescriptor, armnn::DataType::Float32>(
+ workloadFactory,
+ memoryManager,
+ shape,
+ input0,
+ shape,
+ input1,
+ shape,
+ output);
}
LayerTestResult<float, 4> EqualBroadcast1ElementTest(
std::vector<float> output({ 1, 0, 0, 0, 0, 0, 0, 0});
- return ElementwiseTestHelper<armnn::EqualQueueDescriptor, float>
- (workloadFactory,
- memoryManager,
- shape0,
- input0,
- shape1,
- input1,
- shape0,
- output);
+ return ElementwiseTestHelper<armnn::EqualQueueDescriptor, armnn::DataType::Float32>(
+ workloadFactory,
+ memoryManager,
+ shape0,
+ input0,
+ shape1,
+ input1,
+ shape0,
+ output);
}
LayerTestResult<float, 4> EqualBroadcast1DVectorTest(
std::vector<float> output({ 1, 1, 1, 0, 0, 0,
0, 0, 0, 0, 0, 0 });
- return ElementwiseTestHelper<armnn::EqualQueueDescriptor, float>
- (workloadFactory,
- memoryManager,
- shape0,
- input0,
- shape1,
- input1,
- shape0,
- output);
+ return ElementwiseTestHelper<armnn::EqualQueueDescriptor, armnn::DataType::Float32>(
+ workloadFactory,
+ memoryManager,
+ shape0,
+ input0,
+ shape1,
+ input1,
+ shape0,
+ output);
}
LayerTestResult<uint8_t, 4> EqualUint8Test(
std::vector<uint8_t> output({ 0, 0, 0, 0, 1, 1, 1, 1,
1, 1, 1, 1, 0, 0, 0, 0 });
- return ElementwiseTestHelper<armnn::EqualQueueDescriptor, uint8_t >
- (workloadFactory,
- memoryManager,
- shape,
- input0,
- shape,
- input1,
- shape,
- output,
- 1.0f,
- 0);
+ return ElementwiseTestHelper<armnn::EqualQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
+ workloadFactory,
+ memoryManager,
+ shape,
+ input0,
+ shape,
+ input1,
+ shape,
+ output,
+ 1.0f,
+ 0);
}
LayerTestResult<uint8_t, 4> EqualBroadcast1ElementUint8Test(
std::vector<uint8_t> output({ 1, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0 });
- return ElementwiseTestHelper<armnn::EqualQueueDescriptor, uint8_t >
- (workloadFactory,
- memoryManager,
- shape0,
- input0,
- shape1,
- input1,
- shape0,
- output,
- 1.0f,
- 0);
+ return ElementwiseTestHelper<armnn::EqualQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
+ workloadFactory,
+ memoryManager,
+ shape0,
+ input0,
+ shape1,
+ input1,
+ shape0,
+ output,
+ 1.0f,
+ 0);
}
LayerTestResult<uint8_t, 4> EqualBroadcast1DVectorUint8Test(
std::vector<uint8_t> output({ 1, 0, 1, 0, 0, 0,
0, 0, 0, 0, 0, 0 });
- return ElementwiseTestHelper<armnn::EqualQueueDescriptor, uint8_t>
- (workloadFactory,
- memoryManager,
- shape0,
- input0,
- shape1,
- input1,
- shape0,
- output,
- 1.0f,
- 0);
+ return ElementwiseTestHelper<armnn::EqualQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
+ workloadFactory,
+ memoryManager,
+ shape0,
+ input0,
+ shape1,
+ input1,
+ shape0,
+ output,
+ 1.0f,
+ 0);
}
LayerTestResult<float, 4> GreaterSimpleTest(armnn::IWorkloadFactory& workloadFactory,
std::vector<float> output({ 0, 0, 0, 0, 1, 1, 1, 1,
0, 0, 0, 0, 0, 0, 0, 0 });
- return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, float>
- (workloadFactory,
- memoryManager,
- shape,
- input0,
- shape,
- input1,
- shape,
- output);
+ return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, armnn::DataType::Float32>(
+ workloadFactory,
+ memoryManager,
+ shape,
+ input0,
+ shape,
+ input1,
+ shape,
+ output);
}
LayerTestResult<float, 4> GreaterBroadcast1ElementTest(
std::vector<float> output({ 0, 1, 1, 1, 1, 1, 1, 1});
- return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, float>
- (workloadFactory,
- memoryManager,
- shape0,
- input0,
- shape1,
- input1,
- shape0,
- output);
+ return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, armnn::DataType::Float32>(
+ workloadFactory,
+ memoryManager,
+ shape0,
+ input0,
+ shape1,
+ input1,
+ shape0,
+ output);
}
LayerTestResult<float, 4> GreaterBroadcast1DVectorTest(
std::vector<float> output({ 0, 0, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1 });
- return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, float>
- (workloadFactory,
- memoryManager,
- shape0,
- input0,
- shape1,
- input1,
- shape0,
- output);
+ return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, armnn::DataType::Float32>(
+ workloadFactory,
+ memoryManager,
+ shape0,
+ input0,
+ shape1,
+ input1,
+ shape0,
+ output);
}
LayerTestResult<uint8_t, 4> GreaterUint8Test(
std::vector<uint8_t> output({ 0, 0, 0, 0, 0, 0, 0, 0,
1, 1, 1, 1, 0, 0, 0, 0 });
- return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, uint8_t >
- (workloadFactory,
- memoryManager,
- shape,
- input0,
- shape,
- input1,
- shape,
- output,
- 1.0f,
- 0);
+ return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
+ workloadFactory,
+ memoryManager,
+ shape,
+ input0,
+ shape,
+ input1,
+ shape,
+ output,
+ 1.0f,
+ 0);
}
LayerTestResult<uint8_t, 4> GreaterBroadcast1ElementUint8Test(
std::vector<uint8_t> output({ 0, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1 });
- return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, uint8_t >
- (workloadFactory,
- memoryManager,
- shape0,
- input0,
- shape1,
- input1,
- shape0,
- output,
- 1.0f,
- 0);
+ return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
+ workloadFactory,
+ memoryManager,
+ shape0,
+ input0,
+ shape1,
+ input1,
+ shape0,
+ output,
+ 1.0f,
+ 0);
}
LayerTestResult<uint8_t, 4> GreaterBroadcast1DVectorUint8Test(
std::vector<uint8_t> output({ 0, 1, 0, 1, 1, 1,
1, 1, 1, 1, 1, 1 });
- return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, uint8_t>
- (workloadFactory,
- memoryManager,
- shape0,
- input0,
- shape1,
- input1,
- shape0,
- output,
- 1.0f,
- 0);
+ return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
+ workloadFactory,
+ memoryManager,
+ shape0,
+ input0,
+ shape1,
+ input1,
+ shape0,
+ output,
+ 1.0f,
+ 0);
}
LayerTestResult<float, 4> MaximumSimpleTest(armnn::IWorkloadFactory& workloadFactory,
std::vector<float> output({ 2, 2, 2, 2, 5, 5, 5, 5,
4, 4, 4, 4, 5, 5, 5, 5 });
- return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, float>
- (workloadFactory,
- memoryManager,
- shape,
- input0,
- shape,
- input1,
- shape,
- output);
+ return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::Float32>(
+ workloadFactory,
+ memoryManager,
+ shape,
+ input0,
+ shape,
+ input1,
+ shape,
+ output);
}
LayerTestResult<float, 4> MaximumBroadcast1ElementTest(
std::vector<float> output({ 2, 2, 3, 4, 5, 6, 7, 8});
- return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, float>
- (workloadFactory,
- memoryManager,
- shape0,
- input0,
- shape1,
- input1,
- shape0,
- output);
+ return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::Float32>(
+ workloadFactory,
+ memoryManager,
+ shape0,
+ input0,
+ shape1,
+ input1,
+ shape0,
+ output);
}
LayerTestResult<float, 4> MaximumBroadcast1DVectorTest(
std::vector<float> output({ 1, 2, 3, 4, 5, 6,
7, 8, 9, 10, 11, 12 });
- return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, float>
- (workloadFactory,
- memoryManager,
- shape0,
- input0,
- shape1,
- input1,
- shape0,
- output);
+ return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::Float32>(
+ workloadFactory,
+ memoryManager,
+ shape0,
+ input0,
+ shape1,
+ input1,
+ shape0,
+ output);
}
LayerTestResult<uint8_t, 4> MaximumUint8Test(
std::vector<uint8_t> output({ 2, 2, 2, 2, 6, 6, 6, 6,
4, 4, 4, 4, 5, 5, 5, 5 });
- return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, uint8_t >
- (workloadFactory,
- memoryManager,
- shape,
- input0,
- shape,
- input1,
- shape,
- output,
- 1.0f,
- 0);
+ return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
+ workloadFactory,
+ memoryManager,
+ shape,
+ input0,
+ shape,
+ input1,
+ shape,
+ output,
+ 1.0f,
+ 0);
}
LayerTestResult<uint8_t, 4> MaximumBroadcast1ElementUint8Test(
std::vector<uint8_t> output({ 2, 2, 3, 4, 5, 6,
7, 8, 9, 10, 11, 12 });
- return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, uint8_t >
- (workloadFactory,
- memoryManager,
- shape0,
- input0,
- shape1,
- input1,
- shape0,
- output,
- 1.0f,
- 0);
+ return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
+ workloadFactory,
+ memoryManager,
+ shape0,
+ input0,
+ shape1,
+ input1,
+ shape0,
+ output,
+ 1.0f,
+ 0);
}
LayerTestResult<uint8_t, 4> MaximumBroadcast1DVectorUint8Test(
std::vector<uint8_t> output({ 1, 10, 3, 4, 10, 6,
7, 10, 9, 10, 11, 12 });
- return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, uint8_t>
- (workloadFactory,
- memoryManager,
- shape0,
- input0,
- shape1,
- input1,
- shape0,
- output,
- 1.0f,
- 0);
+ return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
+ workloadFactory,
+ memoryManager,
+ shape0,
+ input0,
+ shape1,
+ input1,
+ shape0,
+ output,
+ 1.0f,
+ 0);
}
LayerTestResult<float, 4> MinimumBroadcast1ElementTest1(
std::vector<float> output({ 1, 2, 2, 2, 2, 2, 2, 2});
- return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, float>(workloadFactory,
- memoryManager,
- shape0,
- input0,
- shape1,
- input1,
- shape0,
- output);
+ return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::Float32>(
+ workloadFactory,
+ memoryManager,
+ shape0,
+ input0,
+ shape1,
+ input1,
+ shape0,
+ output);
}
std::vector<float> output({ 1, 5, 3, 2, 5, 5, 1, 5});
- return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, float>(workloadFactory,
- memoryManager,
- shape0,
- input0,
- shape1,
- input1,
- shape0,
- output);
+ return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::Float32>(
+ workloadFactory,
+ memoryManager,
+ shape0,
+ input0,
+ shape1,
+ input1,
+ shape0,
+ output);
}
LayerTestResult<uint8_t, 4> MinimumBroadcast1DVectorUint8Test(
std::vector<uint8_t> output({ 1, 2, 3, 1, 2, 1,
1, 1, 2, 1, 2, 3 });
- return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, uint8_t>(workloadFactory,
- memoryManager,
- shape0,
- input0,
- shape1,
- input1,
- shape0,
- output,
- 1.0f,
- 0);
+ return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
+ workloadFactory,
+ memoryManager,
+ shape0,
+ input0,
+ shape1,
+ input1,
+ shape0,
+ output,
+ 1.0f,
+ 0);
}
namespace {
}
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 1> Concatenation1dTestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
float qScale,
int32_t qOffset)
{
- armnn::TensorInfo inputTensorInfo({ 3 }, armnn::GetDataType<T>());
+ armnn::TensorInfo inputTensorInfo({ 3 }, ArmnnType);
auto input0 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 1.0f, 2.0f, 3.0f }));
auto input1 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 4.0f, 5.0f, 6.0f }));
auto input2 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 7.0f, 8.0f, 9.0f }));
- armnn::TensorInfo outputTensorInfo({ 9 }, armnn::GetDataType<T>());
+ armnn::TensorInfo outputTensorInfo({ 9 }, ArmnnType);
LayerTestResult<T, 1> result(outputTensorInfo);
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concatenation1dTestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
+ return Concatenation1dTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 2> Concatenation2dTestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const float qScale,
const int32_t qOffset)
{
- armnn::TensorInfo inputTensorInfo({ 2, 3 }, armnn::GetDataType<T>());
+ armnn::TensorInfo inputTensorInfo({ 2, 3 }, ArmnnType);
auto input0 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
// Batch 0
return result;
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 2> Concatenation2dDim0TestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
float qScale,
int32_t qOffset)
{
- armnn::TensorInfo outputTensorInfo({ 6, 3 }, armnn::GetDataType<T>());
+ armnn::TensorInfo outputTensorInfo({ 6, 3 }, ArmnnType);
+
+ LayerTestResult<T, 2> result = Concatenation2dTestImpl<ArmnnType>(
+ workloadFactory, memoryManager, outputTensorInfo, 0, qScale, qOffset);
- LayerTestResult<T, 2> result =
- Concatenation2dTestImpl<T>(workloadFactory, memoryManager, outputTensorInfo, 0, qScale, qOffset);
result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
// Batch 0
1.0f, 2.0f, 3.0f,
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concatenation2dDim0TestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
+ return Concatenation2dDim0TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 2> Concatenation2dDim1TestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
float qScale,
int32_t qOffset)
{
- armnn::TensorInfo outputTensorInfo({ 2, 9 }, armnn::GetDataType<T>());
+ armnn::TensorInfo outputTensorInfo({ 2, 9 }, ArmnnType);
+
+ LayerTestResult<T, 2> result = Concatenation2dTestImpl<ArmnnType>(
+ workloadFactory, memoryManager, outputTensorInfo, 1, qScale, qOffset);
- LayerTestResult<T, 2> result =
- Concatenation2dTestImpl<T>(workloadFactory, memoryManager, outputTensorInfo, 1, qScale, qOffset);
result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
// Batch 0
1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f,
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concatenation2dDim1TestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
+ return Concatenation2dDim1TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 2> Concatenation2dDim0DiffInputDimsTestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
float qScale,
int32_t qOffset)
{
- armnn::TensorInfo input0TensorInfo({ 2, 3 }, armnn::GetDataType<T>());
+ armnn::TensorInfo input0TensorInfo({ 2, 3 }, ArmnnType);
auto input0 = MakeTensor<T, 2>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
// Batch 0
1.0f, 2.0f, 3.0f,
10.0f, 11.0f, 12.0f,
}));
- armnn::TensorInfo input1TensorInfo({ 3, 3 }, armnn::GetDataType<T>());
+ armnn::TensorInfo input1TensorInfo({ 3, 3 }, ArmnnType);
auto input1 = MakeTensor<T, 2>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
// Batch 0
4.0f, 5.0f, 6.0f,
7.0f, 8.0f, 9.0f,
}));
- armnn::TensorInfo input2TensorInfo({ 1, 3 }, armnn::GetDataType<T>());
+ armnn::TensorInfo input2TensorInfo({ 1, 3 }, ArmnnType);
auto input2 = MakeTensor<T, 2>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
// Batch 1
16.0f, 17.0f, 18.0f,
}));
- armnn::TensorInfo outputTensorInfo({ 6, 3 }, armnn::GetDataType<T>());
+ armnn::TensorInfo outputTensorInfo({ 6, 3 }, ArmnnType);
LayerTestResult<T, 2> result(outputTensorInfo);
std::vector<T> output;
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concatenation2dDim0DiffInputDimsTestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
+ return Concatenation2dDim0DiffInputDimsTestImpl<armnn::DataType::Float32>(
+ workloadFactory, memoryManager, 0.0f, 0);
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 2> Concatenation2dDim1DiffInputDimsTestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
float qScale,
int32_t qOffset)
{
- armnn::TensorInfo input0TensorInfo({ 2, 3 }, armnn::GetDataType<T>());
+ armnn::TensorInfo input0TensorInfo({ 2, 3 }, ArmnnType);
auto input0 = MakeTensor<T, 2>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
// Batch 0
1.0f, 2.0f, 3.0f,
10.0f, 11.0f, 12.0f,
}));
- armnn::TensorInfo input1TensorInfo({ 2, 5 }, armnn::GetDataType<T>());
+ armnn::TensorInfo input1TensorInfo({ 2, 5 }, ArmnnType);
auto input1 = MakeTensor<T, 2>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
// Batch 0
4.0f, 5.0f, 6.0f, 7.0f, 8.0f,
13.0f, 14.0f, 15.0f, 16.0f, 17.0f,
}));
- armnn::TensorInfo input2TensorInfo({ 2, 1 }, armnn::GetDataType<T>());
+ armnn::TensorInfo input2TensorInfo({ 2, 1 }, ArmnnType);
auto input2 = MakeTensor<T, 2>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
// Batch 0
9.0f,
18.0f
}));
- armnn::TensorInfo outputTensorInfo({ 2, 9 }, armnn::GetDataType<T>());
+ armnn::TensorInfo outputTensorInfo({ 2, 9 }, ArmnnType);
LayerTestResult<T, 2> result(outputTensorInfo);
std::vector<T> output;
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concatenation2dDim1DiffInputDimsTestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
+ return Concatenation2dDim1DiffInputDimsTestImpl<armnn::DataType::Float32>(
+ workloadFactory, memoryManager, 0.0f, 0);
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 3> Concatenation3dTestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
float qScale,
int32_t qOffset)
{
- armnn::TensorInfo inputTensorInfo({ 2, 3, 2 }, armnn::GetDataType<T>());
+ armnn::TensorInfo inputTensorInfo({ 2, 3, 2 }, ArmnnType);
auto input0 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
// Batch 0, Channel 0
return result;
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 3> Concatenation3dDim0TestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
float qScale,
int32_t qOffset)
{
- armnn::TensorInfo outputTensorInfo({ 6, 3, 2 }, armnn::GetDataType<T>());
+ armnn::TensorInfo outputTensorInfo({ 6, 3, 2 }, ArmnnType);
+
+ LayerTestResult<T, 3> result = Concatenation3dTestImpl<ArmnnType>(
+ workloadFactory, memoryManager, outputTensorInfo, 0, true, qScale, qOffset);
- LayerTestResult<T, 3> result =
- Concatenation3dTestImpl<T>(workloadFactory, memoryManager, outputTensorInfo, 0, true, qScale, qOffset);
result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
// Batch 0, Channel 0
1.0f, 2.0f,
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concatenation3dDim0TestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
+ return Concatenation3dDim0TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 3> Concatenation3dDim1TestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
float qScale,
int32_t qOffset)
{
- armnn::TensorInfo outputTensorInfo({ 2, 9, 2 }, armnn::GetDataType<T>());
+ armnn::TensorInfo outputTensorInfo({ 2, 9, 2 }, ArmnnType);
- LayerTestResult<T, 3> result =
- Concatenation3dTestImpl<T>(workloadFactory, memoryManager, outputTensorInfo, 1, true, qScale, qOffset);
+ LayerTestResult<T, 3> result = Concatenation3dTestImpl<ArmnnType>(
+ workloadFactory, memoryManager, outputTensorInfo, 1, true, qScale, qOffset);
result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
// Batch 0, Channel 0
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concatenation3dDim1TestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
+ return Concatenation3dDim1TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 3> Concatenation3dDim2TestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
float qScale,
int32_t qOffset)
{
- armnn::TensorInfo outputTensorInfo({ 2, 3, 6 }, armnn::GetDataType<T>());
+ armnn::TensorInfo outputTensorInfo({ 2, 3, 6 }, ArmnnType);
- LayerTestResult<T, 3> result =
- Concatenation3dTestImpl<T>(workloadFactory, memoryManager, outputTensorInfo, 2, useSubtensor, qScale, qOffset);
+ LayerTestResult<T, 3> result = Concatenation3dTestImpl<ArmnnType>(
+ workloadFactory, memoryManager, outputTensorInfo, 2, useSubtensor, qScale, qOffset);
result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
// Batch 0, Channel 0
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
bool useSubtensor)
{
- return Concatenation3dDim2TestImpl<float>(workloadFactory, memoryManager, useSubtensor, 0.0f, 0);
+ return Concatenation3dDim2TestImpl<armnn::DataType::Float32>(
+ workloadFactory, memoryManager, useSubtensor, 0.0f, 0);
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 3> Concatenation3dDim0DiffInputDimsTestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
float qScale,
int32_t qOffset)
{
- armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, armnn::GetDataType<T>());
+ armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType);
auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
// Batch 0, Channel 0
1.0f, 2.0f,
23.0f, 24.0f
}));
- armnn::TensorInfo input1TensorInfo({ 1, 3, 2 }, armnn::GetDataType<T>());
+ armnn::TensorInfo input1TensorInfo({ 1, 3, 2 }, ArmnnType);
auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
// Batch 0, Channel 0
7.0f, 8.0f,
11.0f, 12.0f,
}));
- armnn::TensorInfo input2TensorInfo({ 3, 3, 2 }, armnn::GetDataType<T>());
+ armnn::TensorInfo input2TensorInfo({ 3, 3, 2 }, ArmnnType);
auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
// Batch 0, Channel 0
25.0f, 26.0f,
35.0f, 36.0f
}));
- armnn::TensorInfo outputTensorInfo({ 6, 3, 2 }, armnn::GetDataType<T>());
+ armnn::TensorInfo outputTensorInfo({ 6, 3, 2 }, ArmnnType);
LayerTestResult<T, 3> result(outputTensorInfo);
std::vector<T> output;
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concatenation3dDim0DiffInputDimsTestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
+ return Concatenation3dDim0DiffInputDimsTestImpl<armnn::DataType::Float32>(
+ workloadFactory, memoryManager, 0.0f, 0);
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 3> Concatenation3dDim1DiffInputDimsTestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
float qScale,
int32_t qOffset)
{
- armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, armnn::GetDataType<T>());
+ armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType);
auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
// Batch 0, Channel 0
1.0f, 2.0f,
23.0f, 24.0f
}));
- armnn::TensorInfo input1TensorInfo({ 2, 4, 2 }, armnn::GetDataType<T>());
+ armnn::TensorInfo input1TensorInfo({ 2, 4, 2 }, ArmnnType);
auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
// Batch 0, Channel 0
7.0f, 8.0f,
15.0f, 16.0f,
}));
- armnn::TensorInfo input2TensorInfo({ 2, 1, 2 }, armnn::GetDataType<T>());
+ armnn::TensorInfo input2TensorInfo({ 2, 1, 2 }, ArmnnType);
auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
// Batch 0, Channel 0
17.0f, 18.0f,
31.0f, 32.0f,
}));
- armnn::TensorInfo outputTensorInfo({ 2, 8, 2 }, armnn::GetDataType<T>());
+ armnn::TensorInfo outputTensorInfo({ 2, 8, 2 }, ArmnnType);
LayerTestResult<T, 3> result(outputTensorInfo);
std::vector<T> output;
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concatenation3dDim1DiffInputDimsTestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
+ return Concatenation3dDim1DiffInputDimsTestImpl<armnn::DataType::Float32>(
+ workloadFactory, memoryManager, 0.0f, 0);
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 3> Concatenation3dDim2DiffInputDimsTestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
float qScale,
int32_t qOffset)
{
- armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, armnn::GetDataType<T>());
+ armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType);
auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
// Batch 0, Channel 0
1.0f, 2.0f,
23.0f, 24.0f
}));
- armnn::TensorInfo input1TensorInfo({ 2, 3, 1 }, armnn::GetDataType<T>());
+ armnn::TensorInfo input1TensorInfo({ 2, 3, 1 }, ArmnnType);
auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
// Batch 0, Channel 0
7.0f,
29.0f
}));
- armnn::TensorInfo input2TensorInfo({ 2, 3, 3 }, armnn::GetDataType<T>());
+ armnn::TensorInfo input2TensorInfo({ 2, 3, 3 }, ArmnnType);
auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
// Batch 0, Channel 0
13.0f, 14.0f, 50.0f,
35.0f, 36.0f, 55.0f,
}));
- armnn::TensorInfo outputTensorInfo({ 2, 3, 6 }, armnn::GetDataType<T>());
+ armnn::TensorInfo outputTensorInfo({ 2, 3, 6 }, ArmnnType);
LayerTestResult<T, 3> result(outputTensorInfo);
std::vector<T> output;
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
bool useSubtensor)
{
- return Concatenation3dDim2DiffInputDimsTestImpl<float>(workloadFactory, memoryManager, useSubtensor, 0.0f, 0);
+ return Concatenation3dDim2DiffInputDimsTestImpl<armnn::DataType::Float32>(
+ workloadFactory, memoryManager, useSubtensor, 0.0f, 0);
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> Concatenation4dTestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
float qScale,
int32_t qOffset)
{
- armnn::TensorInfo inputTensorInfo({ 1, 3, 2, 2 }, armnn::GetDataType<T>());
+ armnn::TensorInfo inputTensorInfo({ 1, 3, 2, 2 }, ArmnnType);
auto input0 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
1.0f, 2.0f,
return result;
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> Concatenation4dDim0TestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
float qScale,
int32_t qOffset)
{
- armnn::TensorInfo outputTensorInfo({ 3, 3, 2, 2 }, armnn::GetDataType<T>());
+ armnn::TensorInfo outputTensorInfo({ 3, 3, 2, 2 }, ArmnnType);
+
+ LayerTestResult<T, 4> result = Concatenation4dTestImpl<ArmnnType>(
+ workloadFactory, memoryManager, outputTensorInfo, 0, true, qScale, qOffset);
- LayerTestResult<T, 4> result = Concatenation4dTestImpl<T>(workloadFactory, memoryManager, outputTensorInfo, 0,
- true, qScale, qOffset);
result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
1.0f, 2.0f,
3.0f, 4.0f,
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concatenation4dDim0TestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
+ return Concatenation4dDim0TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> Concatenation4dDim1TestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
float qScale,
int32_t qOffset)
{
- armnn::TensorInfo outputTensorInfo({ 1, 9, 2, 2 }, armnn::GetDataType<T>());
+ armnn::TensorInfo outputTensorInfo({ 1, 9, 2, 2 }, ArmnnType);
+
+ LayerTestResult<T, 4> result = Concatenation4dTestImpl<ArmnnType>(
+ workloadFactory, memoryManager, outputTensorInfo, 1, true, qScale, qOffset);
- LayerTestResult<T, 4> result = Concatenation4dTestImpl<T>(workloadFactory, memoryManager, outputTensorInfo, 1,
- true, qScale, qOffset);
result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
1.0f, 2.0f,
3.0f, 4.0f,
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concatenation4dDim1TestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
+ return Concatenation4dDim1TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> Concatenation4dDim2TestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
float qScale,
int32_t qOffset)
{
- armnn::TensorInfo outputTensorInfo({ 1, 3, 6, 2 }, armnn::GetDataType<T>());
+ armnn::TensorInfo outputTensorInfo({ 1, 3, 6, 2 }, ArmnnType);
+
+ LayerTestResult<T, 4> result = Concatenation4dTestImpl<ArmnnType>(
+ workloadFactory, memoryManager, outputTensorInfo, 2, true, qScale, qOffset);
- LayerTestResult<T, 4> result = Concatenation4dTestImpl<T>(workloadFactory, memoryManager, outputTensorInfo, 2,
- true, qScale, qOffset);
result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
1.0f, 2.0f,
3.0f, 4.0f,
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concatenation4dDim2TestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
+ return Concatenation4dDim2TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> Concatenation4dDim3TestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
int32_t qOffset,
bool useSubtensor)
{
- armnn::TensorInfo outputTensorInfo({ 1, 3, 2, 6 }, armnn::GetDataType<T>());
+ armnn::TensorInfo outputTensorInfo({ 1, 3, 2, 6 }, ArmnnType);
+
+ LayerTestResult<T, 4> result = Concatenation4dTestImpl<ArmnnType>(
+ workloadFactory, memoryManager, outputTensorInfo, 3, useSubtensor, qScale, qOffset);
- LayerTestResult<T, 4> result = Concatenation4dTestImpl<T>(workloadFactory, memoryManager, outputTensorInfo, 3,
- useSubtensor, qScale, qOffset);
result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
1.0f, 2.0f,
11.0f, 12.0f,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
bool useSubtensor)
{
- return Concatenation4dDim3TestImpl<float>(workloadFactory, memoryManager, 0.0f, 0, useSubtensor);
+ return Concatenation4dDim3TestImpl<armnn::DataType::Float32>(
+ workloadFactory, memoryManager, 0.0f, 0, useSubtensor);
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> Concatenation4dDiffShapeDim0TestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
int32_t qOffset)
{
unsigned int dimension = 0;
- armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, armnn::GetDataType<T>());
+ armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType);
auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
1.0f, 2.0f,
11.0f, 12.0f
}));
- armnn::TensorInfo inputTensorInfo1({ 2, 3, 2, 2 }, armnn::GetDataType<T>());
+ armnn::TensorInfo inputTensorInfo1({ 2, 3, 2, 2 }, ArmnnType);
auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
11.0f, 12.0f,
}));
- armnn::TensorInfo outputTensorInfo({ 3, 3, 2, 2 }, armnn::GetDataType<T>());
+ armnn::TensorInfo outputTensorInfo({ 3, 3, 2, 2 }, ArmnnType);
LayerTestResult<T, 4> result(outputTensorInfo);
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concatenation4dDiffShapeDim0TestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
+ return Concatenation4dDiffShapeDim0TestImpl<armnn::DataType::Float32>(
+ workloadFactory, memoryManager, 0.0f, 0);
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> Concatenation4dDiffShapeDim1TestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
int32_t qOffset)
{
unsigned int dimension = 1;
- armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, armnn::GetDataType<T>());
+ armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType);
auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
1.0f, 2.0f,
11.0f, 12.0f
}));
- armnn::TensorInfo inputTensorInfo1({ 1, 2, 2, 2 }, armnn::GetDataType<T>());
+ armnn::TensorInfo inputTensorInfo1({ 1, 2, 2, 2 }, ArmnnType);
auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
11.0f, 12.0f,
}));
- armnn::TensorInfo outputTensorInfo({ 1, 5, 2, 2 }, armnn::GetDataType<T>());
+ armnn::TensorInfo outputTensorInfo({ 1, 5, 2, 2 }, ArmnnType);
LayerTestResult<T, 4> result(outputTensorInfo);
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concatenation4dDiffShapeDim1TestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
+ return Concatenation4dDiffShapeDim1TestImpl<armnn::DataType::Float32>(
+ workloadFactory, memoryManager, 0.0f, 0);
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> Concatenation4dDiffShapeDim2TestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
int32_t qOffset)
{
unsigned int dimension = 2;
- armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, armnn::GetDataType<T>());
+ armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType);
auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
1.0f, 2.0f,
11.0f, 12.0f
}));
- armnn::TensorInfo inputTensorInfo1({ 1, 3, 3, 2 }, armnn::GetDataType<T>());
+ armnn::TensorInfo inputTensorInfo1({ 1, 3, 3, 2 }, ArmnnType);
auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
11.0f, 12.0f,
27.0f, 28.0f
}));
- armnn::TensorInfo outputTensorInfo({ 1, 3, 5, 2 }, armnn::GetDataType<T>());
+ armnn::TensorInfo outputTensorInfo({ 1, 3, 5, 2 }, ArmnnType);
LayerTestResult<T, 4> result(outputTensorInfo);
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concatenation4dDiffShapeDim2TestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
+ return Concatenation4dDiffShapeDim2TestImpl<armnn::DataType::Float32>(
+ workloadFactory, memoryManager, 0.0f, 0);
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> Concatenation4dDiffShapeDim3TestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
bool useSubtensor)
{
unsigned int dimension = 3;
- armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, armnn::GetDataType<T>());
+ armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType);
auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
1.0f, 2.0f,
11.0f, 12.0f
}));
- armnn::TensorInfo inputTensorInfo1({ 1, 3, 2, 3 }, armnn::GetDataType<T>());
+ armnn::TensorInfo inputTensorInfo1({ 1, 3, 2, 3 }, ArmnnType);
auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
11.0f, 12.0f, 13.0f,
26.0f, 27.0f, 28.0f
}));
- armnn::TensorInfo outputTensorInfo({ 1, 3, 2, 5 }, armnn::GetDataType<T>());
+ armnn::TensorInfo outputTensorInfo({ 1, 3, 2, 5 }, ArmnnType);
LayerTestResult<T, 4> result(outputTensorInfo);
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
bool useSubtensor)
{
- return Concatenation4dDiffShapeDim3TestImpl<float>(workloadFactory, memoryManager, 0.0f, 0, useSubtensor);
+ return Concatenation4dDiffShapeDim3TestImpl<armnn::DataType::Float32>(
+ workloadFactory, memoryManager, 0.0f, 0, useSubtensor);
}
LayerTestResult<float, 4> ResizeBilinearNopTest(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout dataLayout)
{
- const armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo<float>(1, 2, 4, 4, dataLayout);
- const armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo<float>(1, 2, 4, 4, dataLayout);
+ const armnn::TensorInfo inputTensorInfo =
+ armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, armnn::DataType::Float32);
+
+ const armnn::TensorInfo outputTensorInfo =
+ armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, armnn::DataType::Float32);
std::vector<float> inputData({
1.0f, 2.0f, 3.0f, 4.0f,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout dataLayout)
{
- const armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo<float>(1, 2, 2, 2, dataLayout);
- const armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo<float>(1, 2, 1, 1, dataLayout);
+ const armnn::TensorInfo inputTensorInfo =
+ armnnUtils::GetTensorInfo(1, 2, 2, 2, dataLayout, armnn::DataType::Float32);
+
+ const armnn::TensorInfo outputTensorInfo =
+ armnnUtils::GetTensorInfo(1, 2, 1, 1, dataLayout, armnn::DataType::Float32);
std::vector<float> inputData({
1.0f, 255.0f,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout dataLayout)
{
- const armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo<float>(1, 2, 4, 4, dataLayout);
- const armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo<float>(1, 2, 2, 2, dataLayout);
+ const armnn::TensorInfo inputTensorInfo =
+ armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, armnn::DataType::Float32);
+
+ const armnn::TensorInfo outputTensorInfo =
+ armnnUtils::GetTensorInfo(1, 2, 2, 2, dataLayout, armnn::DataType::Float32);
std::vector<float> inputData({
1.0f, 2.0f, 3.0f, 4.0f,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout dataLayout)
{
- const armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo<float>(1, 2, 3, 5, dataLayout);
- const armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo<float>(1, 2, 2, 3, dataLayout);
+ const armnn::TensorInfo inputTensorInfo =
+ armnnUtils::GetTensorInfo(1, 2, 3, 5, dataLayout, armnn::DataType::Float32);
+
+ const armnn::TensorInfo outputTensorInfo =
+ armnnUtils::GetTensorInfo(1, 2, 2, 3, dataLayout, armnn::DataType::Float32);
std::vector<float> inputData({
1.0f, 2.0f, 3.0f, 5.0f, 8.0f,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout dataLayout)
{
- const armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo<float>(1, 2, 3, 2, dataLayout);
- const armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo<float>(1, 2, 3, 5, dataLayout);
+ const armnn::TensorInfo inputTensorInfo =
+ armnnUtils::GetTensorInfo(1, 2, 3, 2, dataLayout, armnn::DataType::Float32);
+
+ const armnn::TensorInfo outputTensorInfo =
+ armnnUtils::GetTensorInfo(1, 2, 3, 5, dataLayout, armnn::DataType::Float32);
std::vector<float> inputData({
1.0f, 2.0f,
} // anonymous namespace
-template<typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 2> Pad2dTestCommon(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::TensorShape inputShape{ 3, 3 };
const armnn::TensorShape outputShape{ 7, 7 };
- const armnn::TensorInfo inputTensorInfo(inputShape, armnn::GetDataType<T>());
- const armnn::TensorInfo outputTensorInfo(outputShape, armnn::GetDataType<T>());
+ const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
+ const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType);
std::vector<T> inputValues(
QuantizedVector<T>(qScale, qOffset,
return result;
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 3> Pad3dTestCommon(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::TensorShape inputShape{ 2, 2, 2 };
const armnn::TensorShape outputShape{ 3, 5, 6 };
- const armnn::TensorInfo inputTensorInfo(inputShape, armnn::GetDataType<T>());
- const armnn::TensorInfo outputTensorInfo(outputShape, armnn::GetDataType<T>());
+ const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
+ const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType);
std::vector<T> inputValues(
QuantizedVector<T>(qScale,qOffset,
return result;
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> Pad4dTestCommon(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::TensorShape inputShape{ 2, 2, 3, 2 };
const armnn::TensorShape outputShape{ 4, 5, 7, 4 };
- const armnn::TensorInfo inputTensorInfo(inputShape, armnn::GetDataType<T>());
- const armnn::TensorInfo outputTensorInfo(outputShape, armnn::GetDataType<T>());
+ const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
+ const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType);
std::vector<T> inputValues(
QuantizedVector<T>(qScale,qOffset,
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Pad2dTestCommon<uint8_t>(workloadFactory, memoryManager, 1.0f, 0);
+ return Pad2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
}
LayerTestResult<uint8_t, 3> PadUint83dTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Pad3dTestCommon<uint8_t>(workloadFactory, memoryManager, 1.0f, 0);
+ return Pad3dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
}
LayerTestResult<uint8_t, 4> PadUint84dTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Pad4dTestCommon<uint8_t>(workloadFactory, memoryManager, 1.0f, 0);
+ return Pad4dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
}
LayerTestResult<float, 2> PadFloat322dTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Pad2dTestCommon<float>(workloadFactory, memoryManager, 0.0f, 0);
+ return Pad2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
}
LayerTestResult<float, 3> PadFloat323dTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Pad3dTestCommon<float>(workloadFactory, memoryManager, 0.0f, 0);
+ return Pad3dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
}
LayerTestResult<float, 4> PadFloat324dTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Pad4dTestCommon<float>(workloadFactory, memoryManager, 0.0f, 0);
+ return Pad4dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
}
LayerTestResult<float, 4> L2Normalization1dTest(
inputValues, expectedOutputValues, layout);
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> ConstantTestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
constexpr unsigned int outputChannels = inputChannels;
constexpr unsigned int outputBatchSize = inputBatchSize;
- armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
- armnn::GetDataType<T>());
+ armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth }, ArmnnType);
- armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
- armnn::GetDataType<T>());
+ armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth }, ArmnnType);
// Set quantization parameters if the requested type is a quantized type.
if(armnn::IsQuantizedType<T>())
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return ConstantTestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
+ return ConstantTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
}
LayerTestResult<uint8_t, 4> ConstantTestUint8(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return ConstantTestImpl<uint8_t>(workloadFactory, memoryManager, 1.0f, 0);
+ return ConstantTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
}
LayerTestResult<uint8_t, 3> MergerUint8Test(
2.f, 4.f
};
- return BatchNormTestImpl<float>(workloadFactory, memoryManager,
- inputOutputShape, inputValues, expectedOutputValues,
- 0.f, 0, armnn::DataLayout::NCHW);
+ return BatchNormTestImpl<armnn::DataType::Float32>(
+ workloadFactory, memoryManager,
+ inputOutputShape, inputValues, expectedOutputValues,
+ 0.f, 0, armnn::DataLayout::NCHW);
}
LayerTestResult<float, 4> BatchNormNhwcTest(
6.f, 4.f
};
- return BatchNormTestImpl<float>(workloadFactory, memoryManager,
- inputOutputShape, inputValues, expectedOutputValues,
- 0.f, 0, armnn::DataLayout::NHWC);
+ return BatchNormTestImpl<armnn::DataType::Float32>(
+ workloadFactory, memoryManager,
+ inputOutputShape, inputValues, expectedOutputValues,
+ 0.f, 0, armnn::DataLayout::NHWC);
}
LayerTestResult<uint8_t, 4> BatchNormUint8Test(
2.f, 4.f
};
- return BatchNormTestImpl<uint8_t>(workloadFactory, memoryManager,
- inputOutputShape, inputValues, expectedOutputValues,
- 1.f/20.f, 50, armnn::DataLayout::NCHW);
+ return BatchNormTestImpl<armnn::DataType::QuantisedAsymm8>(
+ workloadFactory, memoryManager,
+ inputOutputShape, inputValues, expectedOutputValues,
+ 1.f/20.f, 50, armnn::DataLayout::NCHW);
}
LayerTestResult<uint8_t, 4> BatchNormUint8NhwcTest(
6.f, 4.f
};
- return BatchNormTestImpl<uint8_t>(workloadFactory, memoryManager,
- inputOutputShape, inputValues, expectedOutputValues,
- 1.f/20.f, 50, armnn::DataLayout::NHWC);
+ return BatchNormTestImpl<armnn::DataType::QuantisedAsymm8>
+ (workloadFactory, memoryManager,
+ inputOutputShape, inputValues, expectedOutputValues,
+ 1.f/20.f, 50, armnn::DataLayout::NHWC);
}
LayerTestResult<uint8_t, 4> ConstantUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return ConstantTestImpl<uint8_t>(workloadFactory, memoryManager, 2e-6f, 1);
+ return ConstantTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 2e-6f, 1);
}
LayerTestResult<uint8_t, 1> Concatenation1dUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concatenation1dTestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1);
+ return Concatenation1dTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
}
LayerTestResult<uint8_t, 2> Concatenation2dDim0Uint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concatenation2dDim0TestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1);
+ return Concatenation2dDim0TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
}
LayerTestResult<uint8_t, 2> Concatenation2dDim1Uint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concatenation2dDim1TestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1);
+ return Concatenation2dDim1TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
}
LayerTestResult<uint8_t, 2> Concatenation2dDim0DiffInputDimsUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concatenation2dDim0DiffInputDimsTestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1);
+ return Concatenation2dDim0DiffInputDimsTestImpl<armnn::DataType::QuantisedAsymm8>(
+ workloadFactory, memoryManager, 0.5f, -1);
}
LayerTestResult<uint8_t, 2> Concatenation2dDim1DiffInputDimsUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concatenation2dDim1DiffInputDimsTestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1);
+ return Concatenation2dDim1DiffInputDimsTestImpl<armnn::DataType::QuantisedAsymm8>(
+ workloadFactory, memoryManager, 0.5f, -1);
}
LayerTestResult<uint8_t, 3> Concatenation3dDim0Uint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concatenation3dDim0TestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1);
+ return Concatenation3dDim0TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
}
LayerTestResult<uint8_t, 3> Concatenation3dDim1Uint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concatenation3dDim1TestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1);
+ return Concatenation3dDim1TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
}
LayerTestResult<uint8_t, 3> Concatenation3dDim2Uint8Test(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
bool useSubtensor)
{
- return Concatenation3dDim2TestImpl<uint8_t>(workloadFactory, memoryManager, useSubtensor, 0.5f, -1);
+ return Concatenation3dDim2TestImpl<armnn::DataType::QuantisedAsymm8>(
+ workloadFactory, memoryManager, useSubtensor, 0.5f, -1);
}
LayerTestResult<uint8_t, 3> Concatenation3dDim0DiffInputDimsUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concatenation3dDim0TestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1);
+ return Concatenation3dDim0TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
}
LayerTestResult<uint8_t, 3> Concatenation3dDim1DiffInputDimsUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concatenation3dDim1DiffInputDimsTestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1);
+ return Concatenation3dDim1DiffInputDimsTestImpl<armnn::DataType::QuantisedAsymm8>(
+ workloadFactory, memoryManager, 0.5f, -1);
}
LayerTestResult<uint8_t, 3> Concatenation3dDim2DiffInputDimsUint8Test(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
bool useSubtensor)
{
- return Concatenation3dDim2DiffInputDimsTestImpl<uint8_t>(workloadFactory, memoryManager, useSubtensor, 0.5f, -1);
+ return Concatenation3dDim2DiffInputDimsTestImpl<armnn::DataType::QuantisedAsymm8>(
+ workloadFactory, memoryManager, useSubtensor, 0.5f, -1);
}
LayerTestResult<uint8_t, 4> Concatenation4dDim0Uint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concatenation4dDim0TestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1);
+ return Concatenation4dDim0TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
}
LayerTestResult<uint8_t, 4> Concatenation4dDim1Uint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concatenation4dDim1TestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1);
+ return Concatenation4dDim1TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
}
LayerTestResult<uint8_t, 4> Concatenation4dDim2Uint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concatenation4dDim2TestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1);
+ return Concatenation4dDim2TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
}
LayerTestResult<uint8_t, 4> Concatenation4dDim3Uint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, bool useSubtensor)
{
- return Concatenation4dDim3TestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1, useSubtensor);
+ return Concatenation4dDim3TestImpl<armnn::DataType::QuantisedAsymm8>(
+ workloadFactory, memoryManager, 0.5f, -1, useSubtensor);
}
LayerTestResult<uint8_t, 4> Concatenation4dDiffShapeDim0Uint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concatenation4dDiffShapeDim0TestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1);
+ return Concatenation4dDiffShapeDim0TestImpl<armnn::DataType::QuantisedAsymm8>(
+ workloadFactory, memoryManager, 0.5f, -1);
}
LayerTestResult<uint8_t, 4> Concatenation4dDiffShapeDim1Uint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concatenation4dDiffShapeDim1TestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1);
+ return Concatenation4dDiffShapeDim1TestImpl<armnn::DataType::QuantisedAsymm8>(
+ workloadFactory, memoryManager, 0.5f, -1);
}
LayerTestResult<uint8_t, 4> Concatenation4dDiffShapeDim2Uint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concatenation4dDiffShapeDim2TestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1);
+ return Concatenation4dDiffShapeDim2TestImpl<armnn::DataType::QuantisedAsymm8>(
+ workloadFactory, memoryManager, 0.5f, -1);
}
LayerTestResult<uint8_t, 4> Concatenation4dDiffShapeDim3Uint8Test(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
bool useSubtensor)
{
- return Concatenation4dDiffShapeDim3TestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1, useSubtensor);
+ return Concatenation4dDiffShapeDim3TestImpl<armnn::DataType::QuantisedAsymm8>(
+ workloadFactory, memoryManager, 0.5f, -1, useSubtensor);
}
LayerTestResult<float, 4> SimpleMaxPooling2dSize2x2Stride2x2Test(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
bool forceNoPadding)
{
- return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<float>(workloadFactory, memoryManager, forceNoPadding);
+ return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::Float32>(
+ workloadFactory, memoryManager, forceNoPadding);
}
LayerTestResult<uint8_t, 4> SimpleMaxPooling2dSize2x2Stride2x2Uint8Test(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
bool forceNoPadding)
{
- return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<uint8_t>(
+ return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::QuantisedAsymm8>(
workloadFactory, memoryManager, forceNoPadding, 3.0f, -5);
}
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
bool forceNoPadding)
{
- return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<float>(workloadFactory, memoryManager, forceNoPadding);
+ return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::Float32>(
+ workloadFactory, memoryManager, forceNoPadding);
}
LayerTestResult<uint8_t, 4> SimpleMaxPooling2dSize3x3Stride2x4Uint8Test(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
bool forceNoPadding)
{
- return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<uint8_t>(
+ return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::QuantisedAsymm8>(
workloadFactory, memoryManager, forceNoPadding, 0.1f, 128);
}
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout dataLayout)
{
- return SimpleMaxPooling2dTestCommon<float>(workloadFactory, memoryManager, dataLayout);
+ return SimpleMaxPooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
}
LayerTestResult<uint8_t, 4> SimpleMaxPooling2dUint8Test(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout dataLayout)
{
- return SimpleMaxPooling2dTestCommon<uint8_t>(workloadFactory, memoryManager, dataLayout);
+ return SimpleMaxPooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, dataLayout);
}
LayerTestResult<float, 4> SimpleAveragePooling2dTest(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout dataLayout)
{
- return SimpleAveragePooling2dTestCommon<float>(workloadFactory, memoryManager, dataLayout);
+ return SimpleAveragePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
}
LayerTestResult<uint8_t, 4> SimpleAveragePooling2dUint8Test(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout dataLayout)
{
- return SimpleAveragePooling2dTestCommon<uint8_t>(
+ return SimpleAveragePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
workloadFactory, memoryManager, dataLayout, 0.5, -1);
}
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
bool forceNoPadding)
{
- return IgnorePaddingAveragePooling2dSize3x2Stride2x2TestCommon<float>(
+ return IgnorePaddingAveragePooling2dSize3x2Stride2x2TestCommon<armnn::DataType::Float32>(
workloadFactory, memoryManager, forceNoPadding);
}
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return LargeTensorsAveragePooling2dTestCommon<float>(workloadFactory, memoryManager);
+ return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 4> LargeTensorsAveragePooling2dUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return LargeTensorsAveragePooling2dTestCommon<uint8_t>(workloadFactory, memoryManager, 0.5, -1);
+ return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
+ workloadFactory, memoryManager, 0.5, -1);
}
LayerTestResult<float, 4> SimpleL2Pooling2dTest(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout dataLayout)
{
- return SimpleL2Pooling2dTestCommon<float>(workloadFactory, memoryManager, dataLayout);
+ return SimpleL2Pooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
}
LayerTestResult<uint8_t, 4> SimpleL2Pooling2dUint8Test(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout dataLayout)
{
- return SimpleL2Pooling2dTestCommon<uint8_t>(workloadFactory, memoryManager, dataLayout);
+ return SimpleL2Pooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, dataLayout);
}
LayerTestResult<float, 4> L2Pooling2dSize3Stride1Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return L2Pooling2dSize3Stride1TestCommon<float>(workloadFactory, memoryManager);
+ return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride1Uint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return L2Pooling2dSize3Stride1TestCommon<uint8_t>(workloadFactory, memoryManager);
+ return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
}
LayerTestResult<float, 4> L2Pooling2dSize3Stride3Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return L2Pooling2dSize3Stride3TestCommon<float>(workloadFactory, memoryManager);
+ return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride3Uint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return L2Pooling2dSize3Stride3TestCommon<uint8_t>(workloadFactory, memoryManager);
+ return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
}
LayerTestResult<float, 4> L2Pooling2dSize3Stride4Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return L2Pooling2dSize3Stride4TestCommon<float>(workloadFactory, memoryManager);
+ return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride4Uint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return L2Pooling2dSize3Stride4TestCommon<uint8_t>(workloadFactory, memoryManager);
+ return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
}
LayerTestResult<float, 4> L2Pooling2dSize7Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return L2Pooling2dSize7TestCommon<float>(workloadFactory, memoryManager);
+ return L2Pooling2dSize7TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 4> L2Pooling2dSize7Uint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return L2Pooling2dSize7TestCommon<uint8_t>(workloadFactory, memoryManager);
+ return L2Pooling2dSize7TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
}
LayerTestResult<float, 4> L2Pooling2dSize9Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return L2Pooling2dSize9TestCommon<float>(workloadFactory, memoryManager);
+ return L2Pooling2dSize9TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 4> L2Pooling2dSize9Uint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return L2Pooling2dSize9TestCommon<uint8_t>(workloadFactory, memoryManager);
+ return L2Pooling2dSize9TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
}
LayerTestResult<float, 4> AsymmetricNonSquarePooling2dTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return AsymmetricNonSquarePooling2dTestCommon<float>(workloadFactory, memoryManager);
+ return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 4> AsymmetricNonSquarePooling2dUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return AsymmetricNonSquarePooling2dTestCommon<uint8_t>(workloadFactory, memoryManager);
+ return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
}
LayerTestResult<float, 4> ComparePooling2dTest(
armnn::IWorkloadFactory& refWorkloadFactory,
armnn::PoolingAlgorithm poolingType)
{
- return ComparePooling2dTestCommon<float>(
+ return ComparePooling2dTestCommon<armnn::DataType::Float32>(
workloadFactory, memoryManager, refWorkloadFactory, poolingType);
}
armnn::IWorkloadFactory& refWorkloadFactory,
armnn::PoolingAlgorithm poolingType)
{
- return ComparePooling2dTestCommon<uint8_t>(
+ return ComparePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
workloadFactory, memoryManager, refWorkloadFactory, poolingType, 0.1f, 128);
}
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
bool transposeWeights)
{
- return FullyConnectedLargeTestCommon<float>(workloadFactory, memoryManager, transposeWeights);
+ return FullyConnectedLargeTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, transposeWeights);
}
LayerTestResult<float, 4> IgnorePaddingSimpleMaxPooling2dTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return IgnorePaddingSimpleMaxPooling2dTestCommon<float>(workloadFactory, memoryManager);
+ return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 4> IgnorePaddingSimpleMaxPooling2dUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return IgnorePaddingSimpleMaxPooling2dTestCommon<uint8_t>(workloadFactory, memoryManager, 1.0f, -5);
+ return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
+ workloadFactory, memoryManager, 1.0f, -5);
}
LayerTestResult<float, 4> IgnorePaddingMaxPooling2dSize3Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return IgnorePaddingMaxPooling2dSize3TestCommon<float>(workloadFactory, memoryManager);
+ return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 4> IgnorePaddingMaxPooling2dSize3Uint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return IgnorePaddingMaxPooling2dSize3TestCommon<uint8_t>(workloadFactory, memoryManager, 1.0f, -5);
+ return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::QuantisedAsymm8>(
+ workloadFactory, memoryManager, 1.0f, -5);
}
LayerTestResult<float, 4> IgnorePaddingSimpleAveragePooling2dTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return IgnorePaddingSimpleAveragePooling2dTestCommon<float>(workloadFactory, memoryManager);
+ return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 4> IgnorePaddingSimpleAveragePooling2dUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return IgnorePaddingSimpleAveragePooling2dTestCommon<uint8_t>(workloadFactory, memoryManager);
+ return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
+ workloadFactory, memoryManager);
}
LayerTestResult<float, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<float>(workloadFactory, memoryManager);
+ return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::Float32>(
+ workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<uint8_t>(workloadFactory, memoryManager);
+ return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::QuantisedAsymm8>(
+ workloadFactory, memoryManager);
}
LayerTestResult<float, 4> IgnorePaddingAveragePooling2dSize3Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return IgnorePaddingAveragePooling2dSize3TestCommon<float>(workloadFactory, memoryManager);
+ return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 4> IgnorePaddingAveragePooling2dSize3Uint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return IgnorePaddingAveragePooling2dSize3TestCommon<uint8_t>(workloadFactory, memoryManager);
+ return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::QuantisedAsymm8>(
+ workloadFactory, memoryManager);
}
LayerTestResult<float, 4> IgnorePaddingSimpleL2Pooling2dTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return IgnorePaddingSimpleL2Pooling2dTestCommon<float>(workloadFactory, memoryManager);
+ return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 4> IgnorePaddingSimpleL2Pooling2dUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return IgnorePaddingSimpleL2Pooling2dTestCommon<uint8_t>(workloadFactory, memoryManager);
+ return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
}
LayerTestResult<float, 4> IgnorePaddingL2Pooling2dSize3Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return IgnorePaddingL2Pooling2dSize3TestCommon<float>(workloadFactory, memoryManager);
+ return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 4> IgnorePaddingL2Pooling2dSize3Uint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return IgnorePaddingL2Pooling2dSize3TestCommon<uint8_t>(workloadFactory, memoryManager);
+ return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
}
LayerTestResult<float, 4> SimplePermuteFloat32Test(
// 4, 5, 6
// 7, 8, 9
- armnn::TensorInfo poolingInputTensorInfo({ 1, 1, 3, 3}, armnn::GetDataType<float>());
- armnn::TensorInfo poolingOutputTensorInfo({ 1, 1, 2, 2}, armnn::GetDataType<float>());
+ armnn::TensorInfo poolingInputTensorInfo({ 1, 1, 3, 3}, armnn::DataType::Float32);
+ armnn::TensorInfo poolingOutputTensorInfo({ 1, 1, 2, 2}, armnn::DataType::Float32);
boost::multi_array<float, 4> poolingInput = MakeTensor<float,4>(poolingInputTensorInfo,
{1, 2, 3,
// 12, 16
// 24, 28
- armnn::TensorInfo addInputTensorInfo({ 1,1,2,2}, armnn::GetDataType<float>());
- armnn::TensorInfo addOutputTensorInfo({ 1,1,2,2}, armnn::GetDataType<float>());
+ armnn::TensorInfo addInputTensorInfo({ 1,1,2,2}, armnn::DataType::Float32);
+ armnn::TensorInfo addOutputTensorInfo({ 1,1,2,2}, armnn::DataType::Float32);
boost::multi_array<float, 4> addInput = MakeTensor<float,4>(addInputTensorInfo,
{12, 16,
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SpaceToBatchNdSimpleTest<float>(workloadFactory, memoryManager);
+ return SpaceToBatchNdSimpleTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
}
LayerTestResult<float, 4> SpaceToBatchNdMultiChannelsFloat32Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SpaceToBatchNdMultiChannelsTest<float>(workloadFactory, memoryManager);
+ return SpaceToBatchNdMultiChannelsTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
}
LayerTestResult<float, 4> SpaceToBatchNdMultiBlockFloat32Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SpaceToBatchNdMultiBlockTest<float>(workloadFactory, memoryManager);
+ return SpaceToBatchNdMultiBlockTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
}
LayerTestResult<float, 4> SpaceToBatchNdPaddingFloat32Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SpaceToBatchNdPaddingTest<float>(workloadFactory, memoryManager);
+ return SpaceToBatchNdPaddingTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 4> SpaceToBatchNdSimpleUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SpaceToBatchNdSimpleTest<uint8_t>(workloadFactory, memoryManager);
+ return SpaceToBatchNdSimpleTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiChannelsUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SpaceToBatchNdMultiChannelsTest<uint8_t>(workloadFactory, memoryManager);
+ return SpaceToBatchNdMultiChannelsTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiBlockUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SpaceToBatchNdMultiBlockTest<uint8_t>(workloadFactory, memoryManager);
+ return SpaceToBatchNdMultiBlockTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 4> SpaceToBatchNdPaddingUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SpaceToBatchNdPaddingTest<uint8_t>(workloadFactory, memoryManager);
+ return SpaceToBatchNdPaddingTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
}
LayerTestResult<float, 4> SpaceToBatchNdSimpleNHWCFloat32Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SpaceToBatchNdSimpleNHWCTest<float>(workloadFactory, memoryManager);
+ return SpaceToBatchNdSimpleNHWCTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
}
LayerTestResult<float, 4> SpaceToBatchNdMultiChannelsNHWCFloat32Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SpaceToBatchNdMultiChannelsNHWCTest<float>(workloadFactory, memoryManager);
+ return SpaceToBatchNdMultiChannelsNHWCTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
}
LayerTestResult<float, 4> SpaceToBatchNdMultiBlockNHWCFloat32Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SpaceToBatchNdMultiBlockNHWCTest<float>(workloadFactory, memoryManager);
+ return SpaceToBatchNdMultiBlockNHWCTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
}
LayerTestResult<float, 4> SpaceToBatchNdPaddingNHWCFloat32Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SpaceToBatchNdPaddingNHWCTest<float>(workloadFactory, memoryManager);
+ return SpaceToBatchNdPaddingNHWCTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 4> SpaceToBatchNdSimpleNHWCUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SpaceToBatchNdSimpleNHWCTest<uint8_t>(workloadFactory, memoryManager);
+ return SpaceToBatchNdSimpleNHWCTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiChannelsNHWCUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SpaceToBatchNdMultiChannelsNHWCTest<uint8_t>(workloadFactory, memoryManager);
+ return SpaceToBatchNdMultiChannelsNHWCTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiBlockNHWCUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SpaceToBatchNdMultiBlockNHWCTest<uint8_t>(workloadFactory, memoryManager);
+ return SpaceToBatchNdMultiBlockNHWCTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 4> SpaceToBatchNdPaddingNHWCUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SpaceToBatchNdPaddingNHWCTest<uint8_t>(workloadFactory, memoryManager);
+ return SpaceToBatchNdPaddingNHWCTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
}
namespace {
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return StridedSlice4DTest<float>(workloadFactory, memoryManager);
+ return StridedSlice4DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
}
LayerTestResult<float, 4> StridedSlice4DReverseFloat32Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return StridedSlice4DReverseTest<float>(workloadFactory, memoryManager);
+ return StridedSlice4DReverseTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
}
LayerTestResult<float, 4> StridedSliceSimpleStrideFloat32Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return StridedSliceSimpleStrideTest<float>(workloadFactory, memoryManager);
+ return StridedSliceSimpleStrideTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
}
LayerTestResult<float, 4> StridedSliceSimpleRangeMaskFloat32Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return StridedSliceSimpleRangeMaskTest<float>(workloadFactory, memoryManager);
+ return StridedSliceSimpleRangeMaskTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
}
LayerTestResult<float, 2> StridedSliceShrinkAxisMaskFloat32Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return StridedSliceShrinkAxisMaskTest<float>(workloadFactory, memoryManager);
+ return StridedSliceShrinkAxisMaskTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
}
LayerTestResult<float, 3> StridedSlice3DFloat32Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return StridedSlice3DTest<float>(workloadFactory, memoryManager);
+ return StridedSlice3DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
}
LayerTestResult<float, 3> StridedSlice3DReverseFloat32Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return StridedSlice3DReverseTest<float>(workloadFactory, memoryManager);
+ return StridedSlice3DReverseTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
}
LayerTestResult<float, 2> StridedSlice2DFloat32Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return StridedSlice2DTest<float>(workloadFactory, memoryManager);
+ return StridedSlice2DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
}
LayerTestResult<float, 2> StridedSlice2DReverseFloat32Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return StridedSlice2DReverseTest<float>(workloadFactory, memoryManager);
+ return StridedSlice2DReverseTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 4> StridedSlice4DUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return StridedSlice4DTest<uint8_t>(workloadFactory, memoryManager);
+ return StridedSlice4DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 4> StridedSlice4DReverseUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return StridedSlice4DReverseTest<uint8_t>(workloadFactory, memoryManager);
+ return StridedSlice4DReverseTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 4> StridedSliceSimpleStrideUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return StridedSliceSimpleStrideTest<uint8_t>(workloadFactory, memoryManager);
+ return StridedSliceSimpleStrideTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 4> StridedSliceSimpleRangeMaskUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return StridedSliceSimpleRangeMaskTest<uint8_t>(workloadFactory, memoryManager);
+ return StridedSliceSimpleRangeMaskTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 2> StridedSliceShrinkAxisMaskUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return StridedSliceShrinkAxisMaskTest<uint8_t>(workloadFactory, memoryManager);
+ return StridedSliceShrinkAxisMaskTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 3> StridedSlice3DUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return StridedSlice3DTest<uint8_t>(workloadFactory, memoryManager);
+ return StridedSlice3DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 3> StridedSlice3DReverseUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return StridedSlice3DReverseTest<uint8_t>(workloadFactory, memoryManager);
+ return StridedSlice3DReverseTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 2> StridedSlice2DUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return StridedSlice2DTest<uint8_t>(workloadFactory, memoryManager);
+ return StridedSlice2DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 2> StridedSlice2DReverseUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return StridedSlice2DReverseTest<uint8_t>(workloadFactory, memoryManager);
+ return StridedSlice2DReverseTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 4> BatchToSpaceNdNhwcUintTest2(
armnn::IWorkloadFactory& workloadFactory,
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Debug4DTest<float>(workloadFactory, memoryManager);
+ return Debug4DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
}
LayerTestResult<float, 3> Debug3DFloat32Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Debug3DTest<float>(workloadFactory, memoryManager);
+ return Debug3DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
}
LayerTestResult<float, 2> Debug2DFloat32Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Debug2DTest<float>(workloadFactory, memoryManager);
+ return Debug2DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
}
LayerTestResult<float, 1> Debug1DFloat32Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Debug1DTest<float>(workloadFactory, memoryManager);
+ return Debug1DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 4> Debug4DUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Debug4DTest<uint8_t>(workloadFactory, memoryManager);
+ return Debug4DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 3> Debug3DUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Debug3DTest<uint8_t>(workloadFactory, memoryManager);
+ return Debug3DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 2> Debug2DUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Debug2DTest<uint8_t>(workloadFactory, memoryManager);
+ return Debug2DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 1> Debug1DUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Debug1DTest<uint8_t>(workloadFactory, memoryManager);
+ return Debug1DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 4> PreCompiledConvolution2dTest(