IVGCVSW-4246 Clean build of LayerTests with -Wextra
authorDerek Lamberti <derek.lamberti@arm.com>
Tue, 10 Dec 2019 21:57:35 +0000 (21:57 +0000)
committerJim Flynn Arm <jim.flynn@arm.com>
Tue, 17 Dec 2019 19:56:24 +0000 (19:56 +0000)
Change-Id: If3d98e45a6e2fa8e1afd19a4052334335feacf63
Signed-off-by: Derek Lamberti <derek.lamberti@arm.com>
45 files changed:
src/backends/backendsCommon/test/layerTests/AbsTestImpl.cpp
src/backends/backendsCommon/test/layerTests/ActivationTestImpl.cpp
src/backends/backendsCommon/test/layerTests/AdditionTestImpl.cpp
src/backends/backendsCommon/test/layerTests/ArgMinMaxTestImpl.cpp
src/backends/backendsCommon/test/layerTests/BatchNormalizationTestImpl.cpp
src/backends/backendsCommon/test/layerTests/BatchToSpaceNdTestImpl.hpp
src/backends/backendsCommon/test/layerTests/ComparisonTestImpl.cpp
src/backends/backendsCommon/test/layerTests/ConcatTestImpl.cpp
src/backends/backendsCommon/test/layerTests/ConstantTestImpl.cpp
src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp
src/backends/backendsCommon/test/layerTests/ConvertFp16ToFp32TestImpl.cpp
src/backends/backendsCommon/test/layerTests/ConvertFp32ToFp16TestImpl.cpp
src/backends/backendsCommon/test/layerTests/DebugTestImpl.cpp
src/backends/backendsCommon/test/layerTests/DepthToSpaceTestImpl.cpp
src/backends/backendsCommon/test/layerTests/DequantizeTestImpl.cpp
src/backends/backendsCommon/test/layerTests/DivisionTestImpl.cpp
src/backends/backendsCommon/test/layerTests/FakeQuantizationTestImpl.cpp
src/backends/backendsCommon/test/layerTests/FloorTestImpl.cpp
src/backends/backendsCommon/test/layerTests/FullyConnectedTestImpl.cpp
src/backends/backendsCommon/test/layerTests/GatherTestImpl.cpp
src/backends/backendsCommon/test/layerTests/InstanceNormalizationTestImpl.cpp
src/backends/backendsCommon/test/layerTests/L2NormalizationTestImpl.cpp
src/backends/backendsCommon/test/layerTests/LogSoftmaxTestImpl.cpp
src/backends/backendsCommon/test/layerTests/LstmTestImpl.cpp
src/backends/backendsCommon/test/layerTests/MaximumTestImpl.cpp
src/backends/backendsCommon/test/layerTests/MeanTestImpl.hpp
src/backends/backendsCommon/test/layerTests/MinimumTestImpl.cpp
src/backends/backendsCommon/test/layerTests/MultiplicationTestImpl.cpp
src/backends/backendsCommon/test/layerTests/NormalizationTestImpl.cpp
src/backends/backendsCommon/test/layerTests/PadTestImpl.cpp
src/backends/backendsCommon/test/layerTests/PermuteTestImpl.hpp
src/backends/backendsCommon/test/layerTests/Pooling2dTestImpl.cpp
src/backends/backendsCommon/test/layerTests/PreluTestImpl.hpp
src/backends/backendsCommon/test/layerTests/QuantizeTestImpl.cpp
src/backends/backendsCommon/test/layerTests/ReshapeTestImpl.cpp
src/backends/backendsCommon/test/layerTests/ResizeTestImpl.cpp
src/backends/backendsCommon/test/layerTests/RsqrtTestImpl.cpp
src/backends/backendsCommon/test/layerTests/SliceTestImpl.cpp
src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.cpp
src/backends/backendsCommon/test/layerTests/SpaceToBatchNdTestImpl.cpp
src/backends/backendsCommon/test/layerTests/SpaceToDepthTestImpl.cpp
src/backends/backendsCommon/test/layerTests/SplitterTestImpl.cpp
src/backends/backendsCommon/test/layerTests/StackTestImpl.cpp
src/backends/backendsCommon/test/layerTests/StridedSliceTestImpl.cpp
src/backends/backendsCommon/test/layerTests/TransposeConvolution2dTestImpl.cpp

index 31d102c..ff76a38 100644 (file)
@@ -25,6 +25,7 @@ LayerTestResult<T, 2> Abs2dTestCommon(
     const std::vector<float>& inputValues,
     const std::vector<float>& expectedOutputValues)
 {
+    boost::ignore_unused(memoryManager);
     auto inputTensor = MakeTensor<T, 2>(inputTensorInfo, ConvertToDataType<ArmnnType>(inputValues,inputTensorInfo));
 
     LayerTestResult<T, 2> result(outputTensorInfo);
@@ -108,6 +109,8 @@ LayerTestResult<T, 3> Abs3dTest(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
+    boost::ignore_unused(memoryManager);
+
     const armnn::TensorShape inputShape{ 3, 1, 2 };
     const armnn::TensorShape outputShape{ 3, 1, 2 };
 
index a45c6d5..2f2d8db 100644 (file)
@@ -37,6 +37,7 @@ LayerTestResult<T, 4> BoundedReLuTestCommon(
     unsigned int inputChannels,
     unsigned int inputBatchSize)
 {
+    boost::ignore_unused(memoryManager);
     unsigned int outputWidth = inputWidth;
     unsigned int outputHeight = inputHeight;
     unsigned int outputChannels = inputChannels;
@@ -245,6 +246,7 @@ boost::multi_array<float, 4> BoundedReLuRandomInputTest(
     float upperBound,
     const armnn::ActivationDescriptor& activationDescriptor)
 {
+    boost::ignore_unused(memoryManager);
     const armnn::TensorInfo inputTensorInfo = BoundedReLuRandomInputTestTraits::GetInputTensorInfo();
     const armnn::TensorInfo outputTensorInfo = BoundedReLuRandomInputTestTraits::GetOutputTensorInfo();
 
@@ -309,6 +311,7 @@ LayerTestResult<T,4> ConstantLinearActivationTestCommon(
     float qScale = 0.0f,
     int32_t qOffset = 0)
 {
+    boost::ignore_unused(memoryManager);
     unsigned int inputHeight    = 20;
     unsigned int inputWidth     = 17;
     unsigned int inputChannels  = 3;
@@ -400,6 +403,7 @@ LayerTestResult<T, 4> SimpleActivationTest(
     int32_t outOffset,
     const std::vector<float>& outputExpectedData)
 {
+    boost::ignore_unused(memoryManager);
     constexpr static unsigned int inputWidth = 16u;
     constexpr static unsigned int inputHeight = 1u;
     constexpr static unsigned int inputChannels = 1u;
@@ -790,6 +794,7 @@ LayerTestResult<float, 5> SqrtNNTest(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
+    boost::ignore_unused(memoryManager);
     const int inputDataSize = 120;
     std::vector<float> inputData(inputDataSize);
 
@@ -1023,6 +1028,7 @@ LayerTestResult<T,4> CompareActivationTestImpl(
     float qScale = 0.0f,
     int32_t qOffset = 0)
 {
+    boost::ignore_unused(memoryManager);
     unsigned int width     = 17;
     unsigned int height    = 29;
     unsigned int channels  = 2;
index 247821b..a3a21ab 100644 (file)
@@ -165,6 +165,7 @@ LayerTestResult<T, 4> AdditionBroadcastTestImpl(
     float qScale,
     int32_t qOffset)
 {
+    boost::ignore_unused(memoryManager);
     armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 1}, ArmnnType);
     armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 2, 3}, ArmnnType);
     armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
@@ -247,6 +248,7 @@ LayerTestResult<T, 4> AdditionBroadcast1ElementTestImpl(
     float qScale,
     int32_t qOffset)
 {
+    boost::ignore_unused(memoryManager);
     armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
     armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 1, 1}, ArmnnType);
     armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
@@ -453,6 +455,8 @@ LayerTestResult<float, 4> AdditionAfterMaxPoolTest(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
+    boost::ignore_unused(memoryManager);
+
     // Create Initial Tensor
     // 1, 2, 3
     // 4, 5, 6
@@ -559,6 +563,7 @@ LayerTestResult<float,4> CompareAdditionTest(
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     armnn::IWorkloadFactory& refWorkloadFactory)
 {
+    boost::ignore_unused(memoryManager);
     unsigned int batchSize = 4;
     unsigned int channels  = 1;
     unsigned int height    = 2;
index 18c9e54..2733100 100644 (file)
@@ -27,6 +27,7 @@ LayerTestResult<int32_t, 3> ArgMinMaxTestCommon(
         const std::vector<int32_t>& outputData,
         int axis = 3)
 {
+    boost::ignore_unused(memoryManager);
     auto inputTensor = MakeTensor<T, 4>(inputTensorInfo, ConvertToDataType<ArmnnType>(inputData, inputTensorInfo));
 
     LayerTestResult<int32_t, 3> result(outputTensorInfo);
index 8471456..7857b35 100644 (file)
@@ -37,6 +37,7 @@ LayerTestResult<T, 4> BatchNormTestImpl(
     int32_t qOffset,
     armnn::DataLayout dataLayout)
 {
+    boost::ignore_unused(memoryManager);
     armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, ArmnnType);
     armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, ArmnnType);
 
@@ -115,6 +116,8 @@ LayerTestResult<T,4> BatchNormTestNhwcImpl(
     float qScale,
     int32_t qOffset)
 {
+    boost::ignore_unused(memoryManager);
+
     const unsigned int width    = 2;
     const unsigned int height   = 3;
     const unsigned int channels = 2;
@@ -587,6 +590,7 @@ LayerTestResult<float,4> CompareBatchNormTest(
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     armnn::IWorkloadFactory& refWorkloadFactory)
 {
+    boost::ignore_unused(memoryManager);
     const unsigned int width     = 2;
     const unsigned int height    = 3;
     const unsigned int channels  = 5;
index 79462d1..a5016ff 100644 (file)
@@ -40,6 +40,8 @@ LayerTestResult<T, OutputDim> BatchToSpaceNdHelper(
         float scale = 1.0f,
         int32_t offset = 0)
 {
+    boost::ignore_unused(memoryManager);
+
     armnn::TensorInfo inputTensorInfo(InputDim, inputShape, ArmnnType);
     armnn::TensorInfo outputTensorInfo(OutputDim, outputShape, ArmnnType);
 
index 1c54b85..c8272f4 100644 (file)
@@ -44,6 +44,7 @@ LayerTestResult<uint8_t, NumDims> ComparisonTestImpl(
     float outQuantScale,
     int outQuantOffset)
 {
+    boost::ignore_unused(memoryManager);
     BOOST_ASSERT(shape0.GetNumDimensions() == NumDims);
     armnn::TensorInfo inputTensorInfo0(shape0, ArmnnInType, quantScale0, quantOffset0);
 
index 53bfc20..57ed754 100644 (file)
@@ -127,6 +127,7 @@ template<typename T> void PermuteTensorData(
     const T * inputData,
     std::vector<T>& outputData)
 {
+    boost::ignore_unused(memoryManager);
     BOOST_ASSERT_MSG(inputData != nullptr, "inputData must not be null");
     if (inputData == nullptr)
     {
@@ -178,6 +179,7 @@ template<typename T> void PermuteInputsForConcat(
     unsigned int & concatDim,
     TensorInfo & outputTensorInfo)
 {
+    boost::ignore_unused(memoryManager);
     BOOST_ASSERT_MSG(inputTensorInfos.size() > 1,
         "Expecting more than one tensor to be concatenated here");
 
@@ -1917,6 +1919,8 @@ LayerTestResult<T, 3> ConcatDifferentInputOutputQParamTest(
     const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     bool useSubtensor)
 {
+    boost::ignore_unused(memoryManager);
+
     // Defines the tensor descriptors.
     TensorInfo outputTensorInfo({ 3, 6, 3 }, ArmnnType);
     TensorInfo inputTensorInfo1({ 3, 6, 2 }, ArmnnType);
@@ -2070,6 +2074,8 @@ LayerTestResult<float,3> ConcatTest(
     IWorkloadFactory& workloadFactory,
     const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
+    boost::ignore_unused(memoryManager);
+
     unsigned int outputWidth = 3;
     unsigned int outputHeight = 6;
     unsigned int outputChannels = 3;
@@ -2341,6 +2347,8 @@ LayerTestResult<uint8_t, 3> ConcatUint8DifferentQParamsTest(
     IWorkloadFactory& workloadFactory,
     const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
+    boost::ignore_unused(memoryManager);
+
     unsigned int outputWidth = 3;
     unsigned int outputHeight = 6;
     unsigned int outputChannels = 3;
@@ -2484,6 +2492,8 @@ LayerTestResult<uint8_t, 3> ConcatUint8Test(
     IWorkloadFactory& workloadFactory,
     const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
+    boost::ignore_unused(memoryManager);
+
     unsigned int outputWidth = 3;
     unsigned int outputHeight = 6;
     unsigned int outputChannels = 3;
@@ -2620,6 +2630,8 @@ LayerTestResult<uint16_t, 3> ConcatUint16Test(
         IWorkloadFactory& workloadFactory,
         const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
+    boost::ignore_unused(memoryManager);
+
     unsigned int outputWidth = 3;
     unsigned int outputHeight = 6;
     unsigned int outputChannels = 3;
index 1790819..cfb6263 100644 (file)
@@ -29,6 +29,7 @@ LayerTestResult<T, 4> ConstantTestImpl(
     float qScale,
     int32_t qOffset)
 {
+    boost::ignore_unused(memoryManager);
     constexpr unsigned int inputWidth = 3;
     constexpr unsigned int inputHeight = 4;
     constexpr unsigned int inputChannels = 3;
index e825fff..a00fda7 100644 (file)
@@ -218,6 +218,7 @@ LayerTestResult<T, 4> SimpleConvolution2dTestImpl(
     uint32_t dilationX = 1,
     uint32_t dilationY = 1)
 {
+    boost::ignore_unused(memoryManager);
     unsigned int inputHeight   = boost::numeric_cast<unsigned int>(originalInput.shape()[2]);
     unsigned int inputWidth    = boost::numeric_cast<unsigned int>(originalInput.shape()[3]);
     unsigned int inputChannels = boost::numeric_cast<unsigned int>(originalInput.shape()[1]);
@@ -381,6 +382,7 @@ LayerTestResult<T, 4> SimpleConvolution2dNhwcTestImpl(
     uint32_t strideX  = 1,
     uint32_t strideY  = 1)
 {
+    boost::ignore_unused(qScale, qOffset);
     unsigned int inputNum       = boost::numeric_cast<unsigned int>(input.shape()[0]);
     unsigned int inputChannels  = boost::numeric_cast<unsigned int>(input.shape()[3]);
     unsigned int inputHeight    = boost::numeric_cast<unsigned int>(input.shape()[1]);
@@ -586,6 +588,7 @@ LayerTestResult<T, 4> SimpleConvolution2d3x3NhwcTestCommon(
     bool biasEnabled,
     armnn::DataLayout dataLayout)
 {
+    boost::ignore_unused(biasEnabled);
     // Use common single-batch 5x5 image.
 
     armnn::TensorInfo inputDesc({1, 3, 4, 1}, ArmnnType);
@@ -638,6 +641,8 @@ LayerTestResult<T, 4> SimpleConvolution2d3x3Stride2x2TestCommon(
         bool biasEnabled,
         const armnn::DataLayout& dataLayout)
 {
+    boost::ignore_unused(biasEnabled);
+
     // Input is a single-batch, 1 channel, 5x5 image.
     armnn::TensorInfo inputDesc({1, 5, 5, 1}, ArmnnType);
     boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc,
index 61eb03a..5fe644e 100644 (file)
@@ -18,6 +18,7 @@ LayerTestResult<float, 4> SimpleConvertFp16ToFp32Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
+    boost::ignore_unused(memoryManager);
     using namespace half_float::literal;
 
     const armnn::TensorInfo inputTensorInfo({1, 3, 2, 3}, armnn::DataType::Float16);
index e5184e0..f758f4b 100644 (file)
@@ -16,6 +16,7 @@ LayerTestResult<armnn::Half, 4> SimpleConvertFp32ToFp16Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
+    boost::ignore_unused(memoryManager);
     using namespace half_float::literal;
 
     const armnn::TensorInfo inputTensorInfo({1, 3, 2, 3}, armnn::DataType::Float32);
index 023bbae..a4db568 100644 (file)
@@ -31,6 +31,7 @@ LayerTestResult<T, Dim> DebugTestImpl(
     const float qScale = 1.0f,
     const int32_t qOffset = 0)
 {
+    boost::ignore_unused(memoryManager);
     if(armnn::IsQuantizedType<T>())
     {
         inputTensorInfo.SetQuantizationScale(qScale);
index 4e8c938..5e5cba3 100644 (file)
@@ -30,6 +30,7 @@ LayerTestResult<T, 4> DepthToSpaceTestImpl(
     const float qScale = 1.0f,
     const int32_t qOffset = 0)
 {
+    boost::ignore_unused(memoryManager);
     if (descriptor.m_Parameters.m_DataLayout == armnn::DataLayout::NCHW)
     {
         PermuteTensorNhwcToNchw<float>(inputInfo, inputData);
index 844b109..fb225ae 100644 (file)
@@ -27,6 +27,7 @@ LayerTestResult<T1, Dim> DequantizeTestImpl(
         const std::vector<T1>& expectedOutputData,
         armnn::DequantizeQueueDescriptor descriptor)
 {
+    boost::ignore_unused(memoryManager);
     boost::multi_array<T, Dim> input = MakeTensor<T, Dim>(inputTensorInfo, inputData);
 
     LayerTestResult<T1, Dim> ret(outputTensorInfo);
index 9a110a3..b908f96 100644 (file)
@@ -20,6 +20,7 @@ LayerTestResult<float, 4> DivisionByZeroTest(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
+    boost::ignore_unused(memoryManager);
     const unsigned int width        = 2u;
     const unsigned int height       = 2u;
     const unsigned int channelCount = 2u;
index 1ce9d2d..d25673b 100644 (file)
@@ -18,6 +18,7 @@ LayerTestResult<float, 2> FakeQuantizationTest(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
+    boost::ignore_unused(memoryManager);
     constexpr unsigned int width = 2;
     constexpr unsigned int height = 3;
 
index 40ed8a2..a3d29da 100644 (file)
@@ -16,6 +16,7 @@ LayerTestResult<T, 4> SimpleFloorTest(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
+    boost::ignore_unused(memoryManager);
     armnn::TensorInfo inputTensorInfo({1, 3, 2, 3}, ArmnnType);
     inputTensorInfo.SetQuantizationScale(0.1f);
 
index cf101ee..a3fe858 100644 (file)
@@ -35,6 +35,7 @@ LayerTestResult<T, 2> SimpleFullyConnectedTestImpl(
         bool biasEnabled,
         bool transposeWeights)
 {
+    boost::ignore_unused(memoryManager);
     std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
 
index 5e38e48..1ccf51c 100644 (file)
@@ -32,6 +32,7 @@ LayerTestResult<T, OutputDim> GatherTestImpl(
     const std::vector<int32_t>& indicesData,
     const std::vector<T>& outputData)
 {
+    boost::ignore_unused(memoryManager);
     auto params  = MakeTensor<T, ParamsDim>(paramsInfo, paramsData);
     auto indices = MakeTensor<int32_t, IndicesDim>(indicesInfo, indicesData);
 
index c734a2d..468a34d 100644 (file)
@@ -35,6 +35,7 @@ LayerTestResult<T, 4> InstanceNormTestImpl(
     float qScale = 0.0f,
     int32_t qOffset = 0)
 {
+    boost::ignore_unused(memoryManager);
     auto inputTensor = MakeTensor<T, 4>(inputTensorInfo,
                                         armnnUtils::QuantizedVector<T>(inputValues, qScale, qOffset));
 
index d67f7b6..4b16921 100644 (file)
@@ -33,6 +33,7 @@ LayerTestResult<T, 4> L2NormalizationTestImpl(
     const armnn::DataLayout layout,
     float epsilon = 1e-12f)
 {
+    boost::ignore_unused(memoryManager);
     const armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, ArmnnType, scale, offset);
     const armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, ArmnnType, outScale, outOffset);
 
index 3a2f39a..dab8e49 100644 (file)
@@ -37,6 +37,7 @@ LayerTestResult<T, NumDims> LogSoftmaxTestImpl(
     float qScale = 1.0f,
     int32_t qOffset = 0)
 {
+    boost::ignore_unused(memoryManager);
     LayerTestResult<T, NumDims> result(outputInfo);
     result.outputExpected =
         MakeTensor<T, NumDims>(outputInfo, armnnUtils::QuantizedVector<T>(expectedOutputValues, qScale, qOffset));
index 6cea777..e755aa5 100644 (file)
@@ -143,6 +143,7 @@ LstmNoCifgNoPeepholeNoProjectionTestImpl(
         int32_t qOffset = 0,
         armnn::DataType constantDataType = armnn::DataType::Float32)
 {
+    boost::ignore_unused(memoryManager);
     unsigned int batchSize = boost::numeric_cast<unsigned int>(input.shape()[0]);
     unsigned int inputSize = boost::numeric_cast<unsigned int>(input.shape()[1]);
     unsigned int outputSize = boost::numeric_cast<unsigned int>(outputExpected.shape()[1]);
@@ -345,6 +346,7 @@ LstmLayerNoCifgWithPeepholeWithProjectionTestImpl(armnn::IWorkloadFactory& workl
                                                   int32_t qOffset = 0,
                                                   armnn::DataType constantDataType = armnn::DataType::Float32)
 {
+    boost::ignore_unused(memoryManager);
     unsigned int batchSize = 2;
     unsigned int outputSize = 16;
     unsigned int inputSize = 5;
@@ -1059,6 +1061,7 @@ LayerTestResult<T, 2> LstmLayerWithCifgWithPeepholeNoProjectionTestImpl(
         int32_t qOffset = 0,
         armnn::DataType constantDataType = armnn::DataType::Float32)
 {
+    boost::ignore_unused(memoryManager);
     bool cifgEnabled = true;
     bool peepholeEnabled = true;
     bool projectionEnabled = false;
@@ -1283,6 +1286,7 @@ LstmLayerNoCifgWithPeepholeWithProjectionWithLayerNormTestImpl(armnn::IWorkloadF
                                                   int32_t qOffset = 0,
                                                   armnn::DataType constantDataType = armnn::DataType::Float32)
 {
+    boost::ignore_unused(memoryManager);
     unsigned int batchSize = 2;
     unsigned int outputSize = 3;
     unsigned int inputSize = 5;
@@ -1549,6 +1553,7 @@ LayerTestResult<uint8_t, 2> QuantizedLstmTestImpl(
     const boost::multi_array<uint8_t, 2>& input,
     const boost::multi_array<uint8_t, 2>& outputExpected)
 {
+    boost::ignore_unused(memoryManager);
     auto numBatches = boost::numeric_cast<unsigned int>(input.shape()[0]);
     auto inputSize = boost::numeric_cast<unsigned int>(input.shape()[1]);
     auto outputSize = boost::numeric_cast<unsigned int>(outputExpected.shape()[1]);
index 07e2bef..0218697 100644 (file)
@@ -19,6 +19,7 @@ std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::MaximumQueueDescriptor>(
 LayerTestResult<float, 4> MaximumSimpleTest(armnn::IWorkloadFactory& workloadFactory,
                                            const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
+    boost::ignore_unused(memoryManager);
     const unsigned int width        = 2u;
     const unsigned int height       = 2u;
     const unsigned int channelCount = 2u;
index bab7337..b8eae1c 100644 (file)
@@ -28,6 +28,8 @@ LayerTestResult<T, OutputDim> MeanTestHelper(
         float scale = 1.0f,
         int32_t offset = 0)
 {
+    boost::ignore_unused(memoryManager);
+
     armnn::TensorInfo inputTensorInfo(InputDim, inputShape, ArmnnType);
     armnn::TensorInfo outputTensorInfo(OutputDim, outputShape, ArmnnType);
 
index bf66950..ed12c7f 100644 (file)
@@ -20,6 +20,7 @@ LayerTestResult<float, 4> MinimumBroadcast1ElementTest1(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
+    boost::ignore_unused(memoryManager);
     unsigned int shape0[] = { 1, 2, 2, 2 };
     unsigned int shape1[] = { 1, 1, 1, 1 };
 
index 99b1b18..a39e6bd 100644 (file)
@@ -401,6 +401,7 @@ LayerTestResult<float,4> CompareMultiplicationTest(
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     armnn::IWorkloadFactory& refWorkloadFactory)
 {
+    boost::ignore_unused(memoryManager);
     const unsigned int width = 16;
     const unsigned int height = 32;
     const unsigned int channelCount = 2;
index f65960f..ef82855 100644 (file)
@@ -24,6 +24,7 @@ LayerTestResult<float,4> SimpleNormalizationTestImpl(
     armnn::NormalizationAlgorithmChannel normChannel,
     armnn::NormalizationAlgorithmMethod normMethod)
 {
+    boost::ignore_unused(memoryManager);
     const unsigned int inputHeight = 2;
     const unsigned int inputWidth = 2;
     const unsigned int inputChannels = 1;
index 0f9a30e..3a8d2b7 100644 (file)
@@ -24,6 +24,7 @@ LayerTestResult<T, 2> Pad2dTestCommon(
     int32_t qOffset,
     const float customPaddingValue)
 {
+    boost::ignore_unused(memoryManager);
     const armnn::TensorShape inputShape{ 3, 3 };
     const armnn::TensorShape outputShape{ 7, 7 };
 
@@ -95,6 +96,7 @@ LayerTestResult<T, 3> Pad3dTestCommon(
     float qScale,
     int32_t qOffset)
 {
+    boost::ignore_unused(memoryManager);
     const armnn::TensorShape inputShape{ 2, 2, 2 };
     const armnn::TensorShape outputShape{ 3, 5, 6 };
 
@@ -178,6 +180,7 @@ LayerTestResult<T, 4> Pad4dTestCommon(
     float qScale,
     int32_t qOffset)
 {
+    boost::ignore_unused(memoryManager);
     const armnn::TensorShape inputShape{ 2, 2, 3, 2 };
     const armnn::TensorShape outputShape{ 4, 5, 7, 4 };
 
index c5039a1..83746f3 100644 (file)
@@ -26,6 +26,7 @@ LayerTestResult<T, 4> SimplePermuteTestImpl(
         const std::vector<T>& inputData,
         const std::vector<T>& outputExpectedData)
 {
+    boost::ignore_unused(memoryManager);
     auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
 
     LayerTestResult<T, 4> ret(outputTensorInfo);
index 2012dfd..160e658 100644 (file)
@@ -38,6 +38,7 @@ LayerTestResult<T, 4> SimplePooling2dTestImpl(
     const boost::multi_array<T, 4>& input,
     const boost::multi_array<T, 4>& outputExpected)
 {
+    boost::ignore_unused(memoryManager);
     const armnn::DataLayout dataLayout = descriptor.m_DataLayout;
     const armnnUtils::DataLayoutIndexed dimensionIndices = dataLayout;
     auto heightIndex = dimensionIndices.GetHeightIndex();
@@ -739,6 +740,7 @@ LayerTestResult<T, 4> ComparePooling2dTestCommon(
     float qScale = 1.0f,
     int32_t qOffset = 0)
 {
+    boost::ignore_unused(memoryManager);
     const unsigned int inputWidth = 16;
     const unsigned int inputHeight = 32;
     const unsigned int channelCount = 2;
index 99f3cd1..2021349 100644 (file)
@@ -25,6 +25,8 @@ LayerTestResult<T, 4> PreluTest(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
+    boost::ignore_unused(memoryManager);
+
     armnn::TensorInfo inputTensorInfo ({ 1, 2, 2, 3 }, ArmnnType);
     armnn::TensorInfo alphaTensorInfo ({ 1, 1, 1, 3 }, ArmnnType);
     armnn::TensorInfo outputTensorInfo({ 1, 2, 2, 3 }, ArmnnType);
index 481f681..e23f92a 100644 (file)
@@ -30,6 +30,7 @@ LayerTestResult<T, Dim> QuantizeTestImpl(
     const std::vector<T>& expectedOutputData,
     armnn::QuantizeQueueDescriptor descriptor)
 {
+    boost::ignore_unused(memoryManager);
     boost::multi_array<float, Dim> input = MakeTensor<float, Dim>(inputTensorInfo, inputData);
 
     LayerTestResult<T, Dim> ret(outputTensorInfo);
index bce24f0..485e7ea 100644 (file)
@@ -23,6 +23,7 @@ LayerTestResult<T, NumDims> SimpleReshapeTestImpl(
     const std::vector<T>& inputData,
     const std::vector<T>& outputExpectedData)
 {
+    boost::ignore_unused(memoryManager);
     auto input = MakeTensor<T, NumDims>(inputTensorInfo, inputData);
 
     LayerTestResult<T, NumDims> ret(outputTensorInfo);
index 198c60a..080155e 100644 (file)
@@ -74,6 +74,7 @@ LayerTestResult<T, NumDims> ResizeTestImpl(
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     const ResizeTestParams& params)
 {
+    boost::ignore_unused(memoryManager);
     armnn::TensorInfo inputInfo(params.m_InputShape, ArmnnType);
     armnn::TensorInfo outputInfo(params.m_OutputShape, ArmnnType);
 
index 3adb797..24a3b21 100644 (file)
@@ -25,6 +25,7 @@ LayerTestResult<T, 2> Rsqrt2dTestCommon(
     const std::vector<float>& inputValues,
     const std::vector<float>& expectedOutputValues)
 {
+    boost::ignore_unused(memoryManager);
     auto inputTensor = MakeTensor<T, 2>(inputTensorInfo, ConvertToDataType<ArmnnType>(inputValues,inputTensorInfo));
 
     LayerTestResult<T, 2> result(outputTensorInfo);
@@ -97,6 +98,7 @@ LayerTestResult<T, 3> Rsqrt3dTest(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
+    boost::ignore_unused(memoryManager);
     const armnn::TensorShape inputShape{ 3, 1, 2 };
     const armnn::TensorShape outputShape{ 3, 1, 2 };
 
index a60b189..65b1716 100644 (file)
@@ -30,6 +30,7 @@ LayerTestResult<T, NumDims> SliceTestImpl(
     const float qScale = 1.0f,
     const int qOffset = 0)
 {
+    boost::ignore_unused(memoryManager);
     if(armnn::IsQuantizedType<T>())
     {
         inputInfo.SetQuantizationScale(qScale);
index a5f6477..2a1aa76 100644 (file)
@@ -66,6 +66,7 @@ LayerTestResult<T, n> SimpleSoftmaxBaseTestImpl(
     const std::vector<float>& inputData,
     int axis = 1)
 {
+    boost::ignore_unused(memoryManager);
     using std::exp;
 
     const float qScale = 1.f / 256.f;
index ea2130d..2793875 100644 (file)
@@ -32,6 +32,7 @@ LayerTestResult<T, 4> SpaceToBatchNdTestImpl(
     const float qScale = 1.0f,
     const int32_t qOffset = 0)
 {
+    boost::ignore_unused(memoryManager);
     const armnn::PermutationVector NCHWToNHWC = {0, 3, 1, 2};
     if (descriptor.m_Parameters.m_DataLayout == armnn::DataLayout::NHWC)
     {
index c4b11a7..b6bf530 100644 (file)
@@ -32,6 +32,7 @@ LayerTestResult<T, 4> SpaceToDepthTestImpl(
     const float qScale = 1.0f,
     const int32_t qOffset = 0)
 {
+    boost::ignore_unused(memoryManager);
     const armnn::PermutationVector NHWCToNCHW = {0, 2, 3, 1};
 
     if (descriptor.m_Parameters.m_DataLayout == armnn::DataLayout::NCHW)
index 7aebdd0..c8c2f9c 100644 (file)
@@ -25,6 +25,7 @@ std::vector<LayerTestResult<T,3>> SplitterTestCommon(
     float qScale = 0.0f,
     int32_t qOffset = 0)
 {
+    boost::ignore_unused(memoryManager);
     unsigned int inputWidth = 5;
     unsigned int inputHeight = 6;
     unsigned int inputChannels = 3;
@@ -257,6 +258,7 @@ LayerTestResult<T, 3> CopyViaSplitterTestImpl(
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     float qScale, int32_t qOffset)
 {
+    boost::ignore_unused(memoryManager);
     const armnn::TensorInfo tensorInfo({ 3, 6, 5 }, ArmnnType, qScale, qOffset);
     auto input = MakeTensor<T, 3>(
         tensorInfo,
index db9038e..1bf4967 100644 (file)
@@ -31,6 +31,7 @@ LayerTestResult<T, outputDimLength> StackTestHelper(
         const std::vector<std::vector<T>>& inputData,
         const std::vector<T>& outputExpectedData)
 {
+    boost::ignore_unused(memoryManager);
     unsigned int numInputs = static_cast<unsigned int>(inputData.size());
     std::vector<boost::multi_array<T, outputDimLength-1>> inputs;
     for (unsigned int i = 0; i < numInputs; ++i)
index 8082be4..23f5df0 100644 (file)
@@ -30,6 +30,7 @@ LayerTestResult<T, OutDim> StridedSliceTestImpl(
     const float qScale = 1.0f,
     const int32_t qOffset = 0)
 {
+    boost::ignore_unused(memoryManager);
     if(armnn::IsQuantizedType<T>())
     {
         inputTensorInfo.SetQuantizationScale(qScale);
index 71b08aa..4b4894f 100644 (file)
@@ -52,6 +52,7 @@ void TransposeConvolution2dTestImpl(armnn::IWorkloadFactory& workloadFactory,
                                     const TensorData<T>& weights,
                                     const armnn::Optional<TensorData<BT>>& biases)
 {
+    boost::ignore_unused(memoryManager);
     using namespace armnn;
 
     VerifyInputTensorData(input, "input");