const std::vector<float>& inputValues,
const std::vector<float>& expectedOutputValues)
{
+ boost::ignore_unused(memoryManager);
auto inputTensor = MakeTensor<T, 2>(inputTensorInfo, ConvertToDataType<ArmnnType>(inputValues,inputTensorInfo));
LayerTestResult<T, 2> result(outputTensorInfo);
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
+ boost::ignore_unused(memoryManager);
+
const armnn::TensorShape inputShape{ 3, 1, 2 };
const armnn::TensorShape outputShape{ 3, 1, 2 };
unsigned int inputChannels,
unsigned int inputBatchSize)
{
+ boost::ignore_unused(memoryManager);
unsigned int outputWidth = inputWidth;
unsigned int outputHeight = inputHeight;
unsigned int outputChannels = inputChannels;
float upperBound,
const armnn::ActivationDescriptor& activationDescriptor)
{
+ boost::ignore_unused(memoryManager);
const armnn::TensorInfo inputTensorInfo = BoundedReLuRandomInputTestTraits::GetInputTensorInfo();
const armnn::TensorInfo outputTensorInfo = BoundedReLuRandomInputTestTraits::GetOutputTensorInfo();
float qScale = 0.0f,
int32_t qOffset = 0)
{
+ boost::ignore_unused(memoryManager);
unsigned int inputHeight = 20;
unsigned int inputWidth = 17;
unsigned int inputChannels = 3;
int32_t outOffset,
const std::vector<float>& outputExpectedData)
{
+ boost::ignore_unused(memoryManager);
constexpr static unsigned int inputWidth = 16u;
constexpr static unsigned int inputHeight = 1u;
constexpr static unsigned int inputChannels = 1u;
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
+ boost::ignore_unused(memoryManager);
const int inputDataSize = 120;
std::vector<float> inputData(inputDataSize);
float qScale = 0.0f,
int32_t qOffset = 0)
{
+ boost::ignore_unused(memoryManager);
unsigned int width = 17;
unsigned int height = 29;
unsigned int channels = 2;
float qScale,
int32_t qOffset)
{
+ boost::ignore_unused(memoryManager);
armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 1}, ArmnnType);
armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 2, 3}, ArmnnType);
armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
float qScale,
int32_t qOffset)
{
+ boost::ignore_unused(memoryManager);
armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 1, 1}, ArmnnType);
armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
+ boost::ignore_unused(memoryManager);
+
// Create Initial Tensor
// 1, 2, 3
// 4, 5, 6
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
armnn::IWorkloadFactory& refWorkloadFactory)
{
+ boost::ignore_unused(memoryManager);
unsigned int batchSize = 4;
unsigned int channels = 1;
unsigned int height = 2;
const std::vector<int32_t>& outputData,
int axis = 3)
{
+ boost::ignore_unused(memoryManager);
auto inputTensor = MakeTensor<T, 4>(inputTensorInfo, ConvertToDataType<ArmnnType>(inputData, inputTensorInfo));
LayerTestResult<int32_t, 3> result(outputTensorInfo);
int32_t qOffset,
armnn::DataLayout dataLayout)
{
+ boost::ignore_unused(memoryManager);
armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, ArmnnType);
armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, ArmnnType);
float qScale,
int32_t qOffset)
{
+ boost::ignore_unused(memoryManager);
+
const unsigned int width = 2;
const unsigned int height = 3;
const unsigned int channels = 2;
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
armnn::IWorkloadFactory& refWorkloadFactory)
{
+ boost::ignore_unused(memoryManager);
const unsigned int width = 2;
const unsigned int height = 3;
const unsigned int channels = 5;
float scale = 1.0f,
int32_t offset = 0)
{
+ boost::ignore_unused(memoryManager);
+
armnn::TensorInfo inputTensorInfo(InputDim, inputShape, ArmnnType);
armnn::TensorInfo outputTensorInfo(OutputDim, outputShape, ArmnnType);
float outQuantScale,
int outQuantOffset)
{
+ boost::ignore_unused(memoryManager);
BOOST_ASSERT(shape0.GetNumDimensions() == NumDims);
armnn::TensorInfo inputTensorInfo0(shape0, ArmnnInType, quantScale0, quantOffset0);
const T * inputData,
std::vector<T>& outputData)
{
+ boost::ignore_unused(memoryManager);
BOOST_ASSERT_MSG(inputData != nullptr, "inputData must not be null");
if (inputData == nullptr)
{
unsigned int & concatDim,
TensorInfo & outputTensorInfo)
{
+ boost::ignore_unused(memoryManager);
BOOST_ASSERT_MSG(inputTensorInfos.size() > 1,
"Expecting more than one tensor to be concatenated here");
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
bool useSubtensor)
{
+ boost::ignore_unused(memoryManager);
+
// Defines the tensor descriptors.
TensorInfo outputTensorInfo({ 3, 6, 3 }, ArmnnType);
TensorInfo inputTensorInfo1({ 3, 6, 2 }, ArmnnType);
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
+ boost::ignore_unused(memoryManager);
+
unsigned int outputWidth = 3;
unsigned int outputHeight = 6;
unsigned int outputChannels = 3;
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
+ boost::ignore_unused(memoryManager);
+
unsigned int outputWidth = 3;
unsigned int outputHeight = 6;
unsigned int outputChannels = 3;
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
+ boost::ignore_unused(memoryManager);
+
unsigned int outputWidth = 3;
unsigned int outputHeight = 6;
unsigned int outputChannels = 3;
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
+ boost::ignore_unused(memoryManager);
+
unsigned int outputWidth = 3;
unsigned int outputHeight = 6;
unsigned int outputChannels = 3;
float qScale,
int32_t qOffset)
{
+ boost::ignore_unused(memoryManager);
constexpr unsigned int inputWidth = 3;
constexpr unsigned int inputHeight = 4;
constexpr unsigned int inputChannels = 3;
uint32_t dilationX = 1,
uint32_t dilationY = 1)
{
+ boost::ignore_unused(memoryManager);
unsigned int inputHeight = boost::numeric_cast<unsigned int>(originalInput.shape()[2]);
unsigned int inputWidth = boost::numeric_cast<unsigned int>(originalInput.shape()[3]);
unsigned int inputChannels = boost::numeric_cast<unsigned int>(originalInput.shape()[1]);
uint32_t strideX = 1,
uint32_t strideY = 1)
{
+ boost::ignore_unused(qScale, qOffset);
unsigned int inputNum = boost::numeric_cast<unsigned int>(input.shape()[0]);
unsigned int inputChannels = boost::numeric_cast<unsigned int>(input.shape()[3]);
unsigned int inputHeight = boost::numeric_cast<unsigned int>(input.shape()[1]);
bool biasEnabled,
armnn::DataLayout dataLayout)
{
+ boost::ignore_unused(biasEnabled);
// Use common single-batch 5x5 image.
armnn::TensorInfo inputDesc({1, 3, 4, 1}, ArmnnType);
bool biasEnabled,
const armnn::DataLayout& dataLayout)
{
+ boost::ignore_unused(biasEnabled);
+
// Input is a single-batch, 1 channel, 5x5 image.
armnn::TensorInfo inputDesc({1, 5, 5, 1}, ArmnnType);
boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc,
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
+ boost::ignore_unused(memoryManager);
using namespace half_float::literal;
const armnn::TensorInfo inputTensorInfo({1, 3, 2, 3}, armnn::DataType::Float16);
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
+ boost::ignore_unused(memoryManager);
using namespace half_float::literal;
const armnn::TensorInfo inputTensorInfo({1, 3, 2, 3}, armnn::DataType::Float32);
const float qScale = 1.0f,
const int32_t qOffset = 0)
{
+ boost::ignore_unused(memoryManager);
if(armnn::IsQuantizedType<T>())
{
inputTensorInfo.SetQuantizationScale(qScale);
const float qScale = 1.0f,
const int32_t qOffset = 0)
{
+ boost::ignore_unused(memoryManager);
if (descriptor.m_Parameters.m_DataLayout == armnn::DataLayout::NCHW)
{
PermuteTensorNhwcToNchw<float>(inputInfo, inputData);
const std::vector<T1>& expectedOutputData,
armnn::DequantizeQueueDescriptor descriptor)
{
+ boost::ignore_unused(memoryManager);
boost::multi_array<T, Dim> input = MakeTensor<T, Dim>(inputTensorInfo, inputData);
LayerTestResult<T1, Dim> ret(outputTensorInfo);
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
+ boost::ignore_unused(memoryManager);
const unsigned int width = 2u;
const unsigned int height = 2u;
const unsigned int channelCount = 2u;
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
+ boost::ignore_unused(memoryManager);
constexpr unsigned int width = 2;
constexpr unsigned int height = 3;
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
+ boost::ignore_unused(memoryManager);
armnn::TensorInfo inputTensorInfo({1, 3, 2, 3}, ArmnnType);
inputTensorInfo.SetQuantizationScale(0.1f);
bool biasEnabled,
bool transposeWeights)
{
+ boost::ignore_unused(memoryManager);
std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
const std::vector<int32_t>& indicesData,
const std::vector<T>& outputData)
{
+ boost::ignore_unused(memoryManager);
auto params = MakeTensor<T, ParamsDim>(paramsInfo, paramsData);
auto indices = MakeTensor<int32_t, IndicesDim>(indicesInfo, indicesData);
float qScale = 0.0f,
int32_t qOffset = 0)
{
+ boost::ignore_unused(memoryManager);
auto inputTensor = MakeTensor<T, 4>(inputTensorInfo,
armnnUtils::QuantizedVector<T>(inputValues, qScale, qOffset));
const armnn::DataLayout layout,
float epsilon = 1e-12f)
{
+ boost::ignore_unused(memoryManager);
const armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, ArmnnType, scale, offset);
const armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, ArmnnType, outScale, outOffset);
float qScale = 1.0f,
int32_t qOffset = 0)
{
+ boost::ignore_unused(memoryManager);
LayerTestResult<T, NumDims> result(outputInfo);
result.outputExpected =
MakeTensor<T, NumDims>(outputInfo, armnnUtils::QuantizedVector<T>(expectedOutputValues, qScale, qOffset));
int32_t qOffset = 0,
armnn::DataType constantDataType = armnn::DataType::Float32)
{
+ boost::ignore_unused(memoryManager);
unsigned int batchSize = boost::numeric_cast<unsigned int>(input.shape()[0]);
unsigned int inputSize = boost::numeric_cast<unsigned int>(input.shape()[1]);
unsigned int outputSize = boost::numeric_cast<unsigned int>(outputExpected.shape()[1]);
int32_t qOffset = 0,
armnn::DataType constantDataType = armnn::DataType::Float32)
{
+ boost::ignore_unused(memoryManager);
unsigned int batchSize = 2;
unsigned int outputSize = 16;
unsigned int inputSize = 5;
int32_t qOffset = 0,
armnn::DataType constantDataType = armnn::DataType::Float32)
{
+ boost::ignore_unused(memoryManager);
bool cifgEnabled = true;
bool peepholeEnabled = true;
bool projectionEnabled = false;
int32_t qOffset = 0,
armnn::DataType constantDataType = armnn::DataType::Float32)
{
+ boost::ignore_unused(memoryManager);
unsigned int batchSize = 2;
unsigned int outputSize = 3;
unsigned int inputSize = 5;
const boost::multi_array<uint8_t, 2>& input,
const boost::multi_array<uint8_t, 2>& outputExpected)
{
+ boost::ignore_unused(memoryManager);
auto numBatches = boost::numeric_cast<unsigned int>(input.shape()[0]);
auto inputSize = boost::numeric_cast<unsigned int>(input.shape()[1]);
auto outputSize = boost::numeric_cast<unsigned int>(outputExpected.shape()[1]);
LayerTestResult<float, 4> MaximumSimpleTest(armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
+ boost::ignore_unused(memoryManager);
const unsigned int width = 2u;
const unsigned int height = 2u;
const unsigned int channelCount = 2u;
float scale = 1.0f,
int32_t offset = 0)
{
+ boost::ignore_unused(memoryManager);
+
armnn::TensorInfo inputTensorInfo(InputDim, inputShape, ArmnnType);
armnn::TensorInfo outputTensorInfo(OutputDim, outputShape, ArmnnType);
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
+ boost::ignore_unused(memoryManager);
unsigned int shape0[] = { 1, 2, 2, 2 };
unsigned int shape1[] = { 1, 1, 1, 1 };
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
armnn::IWorkloadFactory& refWorkloadFactory)
{
+ boost::ignore_unused(memoryManager);
const unsigned int width = 16;
const unsigned int height = 32;
const unsigned int channelCount = 2;
armnn::NormalizationAlgorithmChannel normChannel,
armnn::NormalizationAlgorithmMethod normMethod)
{
+ boost::ignore_unused(memoryManager);
const unsigned int inputHeight = 2;
const unsigned int inputWidth = 2;
const unsigned int inputChannels = 1;
int32_t qOffset,
const float customPaddingValue)
{
+ boost::ignore_unused(memoryManager);
const armnn::TensorShape inputShape{ 3, 3 };
const armnn::TensorShape outputShape{ 7, 7 };
float qScale,
int32_t qOffset)
{
+ boost::ignore_unused(memoryManager);
const armnn::TensorShape inputShape{ 2, 2, 2 };
const armnn::TensorShape outputShape{ 3, 5, 6 };
float qScale,
int32_t qOffset)
{
+ boost::ignore_unused(memoryManager);
const armnn::TensorShape inputShape{ 2, 2, 3, 2 };
const armnn::TensorShape outputShape{ 4, 5, 7, 4 };
const std::vector<T>& inputData,
const std::vector<T>& outputExpectedData)
{
+ boost::ignore_unused(memoryManager);
auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
LayerTestResult<T, 4> ret(outputTensorInfo);
const boost::multi_array<T, 4>& input,
const boost::multi_array<T, 4>& outputExpected)
{
+ boost::ignore_unused(memoryManager);
const armnn::DataLayout dataLayout = descriptor.m_DataLayout;
const armnnUtils::DataLayoutIndexed dimensionIndices = dataLayout;
auto heightIndex = dimensionIndices.GetHeightIndex();
float qScale = 1.0f,
int32_t qOffset = 0)
{
+ boost::ignore_unused(memoryManager);
const unsigned int inputWidth = 16;
const unsigned int inputHeight = 32;
const unsigned int channelCount = 2;
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
+ boost::ignore_unused(memoryManager);
+
armnn::TensorInfo inputTensorInfo ({ 1, 2, 2, 3 }, ArmnnType);
armnn::TensorInfo alphaTensorInfo ({ 1, 1, 1, 3 }, ArmnnType);
armnn::TensorInfo outputTensorInfo({ 1, 2, 2, 3 }, ArmnnType);
const std::vector<T>& expectedOutputData,
armnn::QuantizeQueueDescriptor descriptor)
{
+ boost::ignore_unused(memoryManager);
boost::multi_array<float, Dim> input = MakeTensor<float, Dim>(inputTensorInfo, inputData);
LayerTestResult<T, Dim> ret(outputTensorInfo);
const std::vector<T>& inputData,
const std::vector<T>& outputExpectedData)
{
+ boost::ignore_unused(memoryManager);
auto input = MakeTensor<T, NumDims>(inputTensorInfo, inputData);
LayerTestResult<T, NumDims> ret(outputTensorInfo);
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const ResizeTestParams& params)
{
+ boost::ignore_unused(memoryManager);
armnn::TensorInfo inputInfo(params.m_InputShape, ArmnnType);
armnn::TensorInfo outputInfo(params.m_OutputShape, ArmnnType);
const std::vector<float>& inputValues,
const std::vector<float>& expectedOutputValues)
{
+ boost::ignore_unused(memoryManager);
auto inputTensor = MakeTensor<T, 2>(inputTensorInfo, ConvertToDataType<ArmnnType>(inputValues,inputTensorInfo));
LayerTestResult<T, 2> result(outputTensorInfo);
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
+ boost::ignore_unused(memoryManager);
const armnn::TensorShape inputShape{ 3, 1, 2 };
const armnn::TensorShape outputShape{ 3, 1, 2 };
const float qScale = 1.0f,
const int qOffset = 0)
{
+ boost::ignore_unused(memoryManager);
if(armnn::IsQuantizedType<T>())
{
inputInfo.SetQuantizationScale(qScale);
const std::vector<float>& inputData,
int axis = 1)
{
+ boost::ignore_unused(memoryManager);
using std::exp;
const float qScale = 1.f / 256.f;
const float qScale = 1.0f,
const int32_t qOffset = 0)
{
+ boost::ignore_unused(memoryManager);
const armnn::PermutationVector NCHWToNHWC = {0, 3, 1, 2};
if (descriptor.m_Parameters.m_DataLayout == armnn::DataLayout::NHWC)
{
const float qScale = 1.0f,
const int32_t qOffset = 0)
{
+ boost::ignore_unused(memoryManager);
const armnn::PermutationVector NHWCToNCHW = {0, 2, 3, 1};
if (descriptor.m_Parameters.m_DataLayout == armnn::DataLayout::NCHW)
float qScale = 0.0f,
int32_t qOffset = 0)
{
+ boost::ignore_unused(memoryManager);
unsigned int inputWidth = 5;
unsigned int inputHeight = 6;
unsigned int inputChannels = 3;
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
float qScale, int32_t qOffset)
{
+ boost::ignore_unused(memoryManager);
const armnn::TensorInfo tensorInfo({ 3, 6, 5 }, ArmnnType, qScale, qOffset);
auto input = MakeTensor<T, 3>(
tensorInfo,
const std::vector<std::vector<T>>& inputData,
const std::vector<T>& outputExpectedData)
{
+ boost::ignore_unused(memoryManager);
unsigned int numInputs = static_cast<unsigned int>(inputData.size());
std::vector<boost::multi_array<T, outputDimLength-1>> inputs;
for (unsigned int i = 0; i < numInputs; ++i)
const float qScale = 1.0f,
const int32_t qOffset = 0)
{
+ boost::ignore_unused(memoryManager);
if(armnn::IsQuantizedType<T>())
{
inputTensorInfo.SetQuantizationScale(qScale);
const TensorData<T>& weights,
const armnn::Optional<TensorData<BT>>& biases)
{
+ boost::ignore_unused(memoryManager);
using namespace armnn;
VerifyInputTensorData(input, "input");