2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
6 #include "Pooling2dTestImpl.hpp"
8 #include <QuantizeHelper.hpp>
9 #include <ResolveType.hpp>
11 #include <armnn/LayerSupport.hpp>
13 #include <armnnUtils/TensorUtils.hpp>
14 #include <armnnUtils/DataLayoutIndexed.hpp>
15 #include <armnnUtils/Permute.hpp>
17 #include <backendsCommon/WorkloadInfo.hpp>
19 #include <backendsCommon/test/TensorCopyUtils.hpp>
20 #include <backendsCommon/test/WorkloadTestUtils.hpp>
22 #include <test/TensorHelpers.hpp>
24 #include <boost/numeric/conversion/cast.hpp>
29 using namespace armnnUtils;
31 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
32 LayerTestResult<T, 4> SimplePooling2dTestImpl(
33 armnn::IWorkloadFactory& workloadFactory,
34 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
35 armnn::Pooling2dDescriptor descriptor,
38 const boost::multi_array<T, 4>& input,
39 const boost::multi_array<T, 4>& outputExpected)
41 boost::ignore_unused(memoryManager);
42 const armnn::DataLayout dataLayout = descriptor.m_DataLayout;
43 const armnnUtils::DataLayoutIndexed dimensionIndices = dataLayout;
44 auto heightIndex = dimensionIndices.GetHeightIndex();
45 auto widthIndex = dimensionIndices.GetWidthIndex();
46 auto channelsIndex = dimensionIndices.GetChannelsIndex();
48 unsigned int inputHeight = boost::numeric_cast<unsigned int>(input.shape()[heightIndex]);
49 unsigned int inputWidth = boost::numeric_cast<unsigned int>(input.shape()[widthIndex]);
50 unsigned int inputChannels = boost::numeric_cast<unsigned int>(input.shape()[channelsIndex]);
51 unsigned int inputBatchSize = boost::numeric_cast<unsigned int>(input.shape()[0]);
53 unsigned int outputHeight = boost::numeric_cast<unsigned int>(outputExpected.shape()[heightIndex]);
54 unsigned int outputWidth = boost::numeric_cast<unsigned int>(outputExpected.shape()[widthIndex]);
55 unsigned int outputChannels = boost::numeric_cast<unsigned int>(outputExpected.shape()[channelsIndex]);
56 unsigned int outputBatchSize = boost::numeric_cast<unsigned int>(outputExpected.shape()[0]);
58 armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo(
59 inputBatchSize, inputChannels, inputHeight, inputWidth, dataLayout, ArmnnType);
61 armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo(
62 outputBatchSize, outputChannels, outputHeight, outputWidth, dataLayout, ArmnnType);
64 // Set quantization parameters if the requested type is a quantized type.
65 if(armnn::IsQuantizedType<T>())
67 inputTensorInfo.SetQuantizationScale(qScale);
68 inputTensorInfo.SetQuantizationOffset(qOffset);
69 outputTensorInfo.SetQuantizationScale(qScale);
70 outputTensorInfo.SetQuantizationOffset(qOffset);
73 LayerTestResult<T, 4> result(outputTensorInfo);
75 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
76 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
78 armnn::Pooling2dQueueDescriptor queueDescriptor;
79 queueDescriptor.m_Parameters = descriptor;
80 queueDescriptor.m_Parameters.m_DataLayout = dataLayout;
82 armnn::WorkloadInfo workloadInfo;
83 AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfo, inputHandle.get());
84 AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
86 // Don't execute if Pooling is not supported, as an exception will be raised.
87 armnn::BackendId backend = workloadFactory.GetBackendId();
88 const size_t reasonIfUnsupportedMaxLen = 255;
89 char reasonIfUnsupported[reasonIfUnsupportedMaxLen+1];
90 result.supported = armnn::IsPooling2dSupported(backend, inputTensorInfo, outputTensorInfo,
91 queueDescriptor.m_Parameters,
92 reasonIfUnsupported, reasonIfUnsupportedMaxLen);
93 if (!result.supported)
98 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePooling2d(queueDescriptor, workloadInfo);
100 inputHandle->Allocate();
101 outputHandle->Allocate();
103 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
107 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
109 result.outputExpected = outputExpected;
115 // Tests max pooling with the following parameters:
123 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
124 LayerTestResult<T, 4> SimpleMaxPooling2dSize3x3Stride2x4TestCommon(
125 armnn::IWorkloadFactory& workloadFactory,
126 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
131 armnn::Pooling2dDescriptor descriptor;
132 descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
133 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
134 descriptor.m_StrideX = 2;
135 descriptor.m_StrideY = 4;
136 // forceNoPadding is mainly used for compatibility with ARM Compute.
137 // As of 16/05/2017, it errors if padX or padY are equal to or greater than the pool size.
138 descriptor.m_PadLeft = descriptor.m_PadRight = forceNoPadding ? 0 : 3;
139 descriptor.m_PadTop = descriptor.m_PadBottom = 0;
140 descriptor.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
141 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
143 unsigned int inputWidth = 8;
144 unsigned int inputHeight = 13;
145 unsigned int outputWidth =
146 (inputWidth + descriptor.m_PadLeft + descriptor.m_PadRight + descriptor.m_StrideX - descriptor.m_PoolWidth) /
147 descriptor.m_StrideX;
148 unsigned int outputHeight =
149 (inputHeight + descriptor.m_PadTop + descriptor.m_PadBottom + descriptor.m_StrideY - descriptor.m_PoolHeight) /
150 descriptor.m_StrideY;
151 unsigned int channels = 2;
152 unsigned int batchSize = 2;
154 armnn::TensorInfo inputTensorInfo({ batchSize, channels, inputHeight, inputWidth }, ArmnnType);
155 armnn::TensorInfo outputTensorInfo({ batchSize, channels, outputHeight, outputWidth }, ArmnnType);
157 // Set quantization parameters if the requested type is a quantized type.
158 if(armnn::IsQuantizedType<T>())
160 inputTensorInfo.SetQuantizationScale(qScale);
161 inputTensorInfo.SetQuantizationOffset(qOffset);
162 outputTensorInfo.SetQuantizationScale(qScale);
163 outputTensorInfo.SetQuantizationOffset(qOffset);
166 std::vector<float> singleChannelData({
167 0.0f, 4.0f, 8.0f, 1.0f, 6.0f, 4.0f, 5.0f, 8.0f,
168 1.0f, 1.0f, 6.0f, 0.0f, 3.0f, 7.0f, 4.0f, 7.0f,
169 8.0f, 5.0f, 0.0f, 0.0f, 8.0f, 3.0f, 4.0f, 3.0f,
170 8.0f, 2.0f, 5.0f, 4.0f, 1.0f, 9.0f, 2.0f, 0.0f,
171 5.0f, 4.0f, 5.0f, 0.0f, 0.0f, 0.0f, 7.0f, 2.0f,
172 1.0f, 2.0f, 6.0f, 2.0f, 7.0f, 9.0f, 5.0f, 2.0f,
173 9.0f, 7.0f, 3.0f, 1.0f, 3.0f, 4.0f, 8.0f, 3.0f,
174 1.0f, 0.0f, 0.0f, 5.0f, 5.0f, 4.0f, 2.0f, 0.0f,
175 6.0f, 4.0f, 3.0f, 6.0f, 9.0f, 5.0f, 5.0f, 6.0f,
176 8.0f, 7.0f, 9.0f, 6.0f, 1.0f, 4.0f, 1.0f, 9.0f,
177 7.0f, 1.0f, 9.0f, 2.0f, 9.0f, 9.0f, 8.0f, 1.0f,
178 4.0f, 4.0f, 5.0f, 9.0f, 2.0f, 6.0f, 6.0f, 4.0f,
179 3.0f, 5.0f, 4.0f, 0.0f, 1.0f, 5.0f, 9.0f, 7.0f,
182 // Constructs input data.
183 std::vector<float> inputData;
184 auto negator = [](float f) { return -f; };
186 // First image (two channels where the second channel is the negative of the first one).
187 inputData.insert(inputData.end(), singleChannelData.begin(), singleChannelData.end());
188 std::transform(singleChannelData.begin(), singleChannelData.end(), std::back_inserter(inputData), negator);
190 // Second image (same as first image).
191 inputData.insert(inputData.end(), singleChannelData.begin(), singleChannelData.end());
192 std::transform(singleChannelData.begin(), singleChannelData.end(), std::back_inserter(inputData), negator);
194 auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(inputData, qScale, qOffset));
196 // These were calculated manually.
197 auto shape(GetTensorShapeAsArray<4>(outputTensorInfo));
198 boost::multi_array<T, 4> outputExpected(shape);
201 outputExpected = MakeTensor<T, 4>(outputTensorInfo,
223 outputExpected = MakeTensor<T, 4>(outputTensorInfo,
225 0.0f, 8.0f, 8.0f, 8.0f, 8.0f, 8.0f,
226 0.0f, 9.0f, 7.0f, 9.0f, 9.0f, 3.0f,
227 0.0f, 8.0f, 9.0f, 9.0f, 9.0f, 9.0f,
229 0.0f, 0.0f, 0.0f, 0.0f,-3.0f,-3.0f,
230 0.0f,-1.0f, 0.0f, 0.0f, 0.0f,-2.0f,
231 0.0f,-1.0f,-1.0f,-1.0f,-1.0f,-1.0f,
233 0.0f, 8.0f, 8.0f, 8.0f, 8.0f, 8.0f,
234 0.0f, 9.0f, 7.0f, 9.0f, 9.0f, 3.0f,
235 0.0f, 8.0f, 9.0f, 9.0f, 9.0f, 9.0f,
237 0.0f, 0.0f, 0.0f, 0.0f,-3.0f,-3.0f,
238 0.0f,-1.0f, 0.0f, 0.0f, 0.0f,-2.0f,
239 0.0f,-1.0f,-1.0f,-1.0f,-1.0f,-1.0f
244 return SimplePooling2dTestImpl<ArmnnType>(
245 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
248 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
249 LayerTestResult<T, 4> SimpleMaxPooling2dTestCommon(
250 armnn::IWorkloadFactory& workloadFactory,
251 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
252 const armnn::DataLayout dataLayout = armnn::DataLayout::NCHW,
256 armnn::Pooling2dDescriptor descriptor;
257 descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
258 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
259 descriptor.m_StrideX = descriptor.m_StrideY = 2;
260 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
261 descriptor.m_DataLayout = dataLayout;
263 armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, ArmnnType);
264 armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 2, 2, dataLayout, ArmnnType);
266 // Set quantization parameters if the requested type is a quantized type.
267 if(armnn::IsQuantizedType<T>())
269 inputTensorInfo.SetQuantizationScale(qScale);
270 inputTensorInfo.SetQuantizationOffset(qOffset);
271 outputTensorInfo.SetQuantizationScale(qScale);
272 outputTensorInfo.SetQuantizationOffset(qOffset);
275 std::vector<T> inputData(
277 1.0f, 2.0f, 5.0f, 6.0f,
278 3.0f, 4.0f, 7.0f, 8.0f,
279 9.0f, 10.0f, 13.0f, 14.0f,
280 11.0f, 12.0f, 15.0f, 16.0f,
282 17.0f, 18.0f, 21.0f, 22.0f,
283 19.0f, 20.0f, 23.0f, 24.0f,
284 25.0f, 26.0f, 29.0f, 30.0f,
285 27.0f, 28.0f, 31.0f, 32.0f,
289 std::vector<T> outputData(
299 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
300 if (dataLayout == armnn::DataLayout::NHWC)
302 std::vector<T> tmp(inputData.size());
303 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(T));
306 std::vector<T> tmp1(outputData.size());
307 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(T));
311 auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
313 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputData);
315 return SimplePooling2dTestImpl<ArmnnType>(
316 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
319 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
320 LayerTestResult<T, 4> SimpleAveragePooling2dTestCommon(
321 armnn::IWorkloadFactory& workloadFactory,
322 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
323 armnn::DataLayout dataLayout = armnn::DataLayout::NCHW,
327 armnn::Pooling2dDescriptor descriptor;
328 descriptor.m_PoolType = armnn::PoolingAlgorithm::Average;
329 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
330 descriptor.m_StrideX = descriptor.m_StrideY = 2;
331 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
332 descriptor.m_DataLayout = dataLayout;
334 armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, ArmnnType);
335 armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 2, 2, dataLayout, ArmnnType);
337 // Set quantization parameters if the requested type is a quantized type.
338 if(armnn::IsQuantizedType<T>())
340 inputTensorInfo.SetQuantizationScale(qScale);
341 inputTensorInfo.SetQuantizationOffset(qOffset);
342 outputTensorInfo.SetQuantizationScale(qScale);
343 outputTensorInfo.SetQuantizationOffset(qOffset);
346 std::vector<T> inputData(
348 2.0f, 2.0f, 6.0f, 6.0f,
349 4.0f, 4.0f, 8.0f, 8.0f,
350 10.0f, 12.0f, 14.0f, 16.0f,
351 10.0f, 12.0f, 16.0f, 14.0f,
353 18.0f, 20.0f, 24.0f, 22.0f,
354 20.0f, 18.0f, 22.0f, 24.0f,
355 26.0f, 28.0f, 0.0f, 0.0f,
356 26.0f, 28.0f, 0.0f, 0.0f,
360 std::vector<T> outputData(
370 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
371 if (dataLayout == armnn::DataLayout::NHWC)
373 std::vector<T> tmp(inputData.size());
374 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(T));
377 std::vector<T> tmp1(outputData.size());
378 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(T));
382 auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
384 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputData);
386 return SimplePooling2dTestImpl<ArmnnType>(
387 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
390 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
391 LayerTestResult<T, 4> LargeTensorsAveragePooling2dTestCommon(
392 armnn::IWorkloadFactory& workloadFactory,
393 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
397 armnn::Pooling2dDescriptor descriptor;
398 descriptor.m_PoolType = armnn::PoolingAlgorithm::Average;
399 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 100;
400 descriptor.m_StrideX = descriptor.m_StrideY = 5;
401 descriptor.m_PadLeft = 50;
402 descriptor.m_PadRight = 50;
403 descriptor.m_PadTop = 50;
404 descriptor.m_PadBottom = 50;
405 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
407 armnn::TensorInfo inputTensorInfo({ 5, 3, 52, 60 }, ArmnnType);
408 armnn::TensorInfo outputTensorInfo({ 5, 3, 11, 13 }, ArmnnType);
410 // Set quantization parameters if the requested type is a quantized type.
411 if(armnn::IsQuantizedType<T>())
413 inputTensorInfo.SetQuantizationScale(qScale);
414 inputTensorInfo.SetQuantizationOffset(qOffset);
415 outputTensorInfo.SetQuantizationScale(qScale);
416 outputTensorInfo.SetQuantizationOffset(qOffset);
419 std::vector<T> inputVec;
421 for (unsigned int i = 0 ; i < inputTensorInfo.GetShape().GetNumElements(); ++i)
423 inputVec.push_back(1);
426 auto input = MakeTensor<T, 4>(inputTensorInfo, inputVec);
428 std::vector<T> outputVec;
430 for (unsigned int i = 0 ; i < outputTensorInfo.GetShape().GetNumElements(); ++i)
432 outputVec.push_back(1);
435 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputVec);
437 return SimplePooling2dTestImpl<ArmnnType>(
438 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
441 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
442 LayerTestResult<T, 4> SimpleL2Pooling2dTestCommon(
443 armnn::IWorkloadFactory& workloadFactory,
444 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
445 armnn::DataLayout dataLayout = armnn::DataLayout::NCHW,
449 armnn::Pooling2dDescriptor descriptor;
450 descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
451 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
452 descriptor.m_StrideX = descriptor.m_StrideY = 2;
453 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
454 descriptor.m_DataLayout = dataLayout;
456 armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, ArmnnType);
457 armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 2, 2, dataLayout, ArmnnType);
459 std::vector<T> inputData(
461 1.0f, 7.0f, 5.0f, 5.0f,
462 1.0f, 7.0f, 5.0f, 5.0f,
463 3.0f, 3.0f, 1.0f, 1.0f,
464 3.0f, 3.0f, 1.0f, 1.0f,
466 1.0f, 7.0f, 0.0f, 0.0f,
467 1.0f, 7.0f, 2.0f, 0.0f,
468 0.0f, 2.0f, 1.0f, 1.0f,
469 0.0f, 0.0f, 1.0f, 1.0f,
473 std::vector<T> outputData(
483 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
484 if (dataLayout == armnn::DataLayout::NHWC)
486 std::vector<T> tmp(inputData.size());
487 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(T));
490 std::vector<T> tmp1(outputData.size());
491 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(T));
495 auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
497 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputData);
499 return SimplePooling2dTestImpl<ArmnnType>(
500 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
503 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
504 LayerTestResult<T, 4> L2Pooling2dSize3Stride1TestCommon(
505 armnn::IWorkloadFactory& workloadFactory,
506 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
510 armnn::Pooling2dDescriptor descriptor;
511 descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
512 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
513 descriptor.m_StrideX = descriptor.m_StrideY = 1;
514 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
516 armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
517 auto input = MakeTensor<T, 4>(inputTensorInfo,
519 2.0f, 1.0f, 5.0f, 2.0f,
520 1.0f, 2.0f, 2.0f, 1.0f,
521 5.0f, 4.0f, 1.0f, 5.0f,
522 2.0f, 1.0f, 5.0f, 2.0f,
526 armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, ArmnnType);
527 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
534 return SimplePooling2dTestImpl<ArmnnType>(
535 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
538 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
539 LayerTestResult<T, 4> L2Pooling2dSize3Stride3TestCommon(
540 armnn::IWorkloadFactory& workloadFactory,
541 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
545 armnn::Pooling2dDescriptor descriptor;
546 descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
547 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
548 descriptor.m_StrideX = descriptor.m_StrideY = 3;
549 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
551 armnn::TensorInfo inputTensorInfo({ 1, 1, 9, 9 }, ArmnnType);
552 auto input = MakeTensor<T, 4>(inputTensorInfo,
554 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
555 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
556 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
557 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
558 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
559 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
560 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
561 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
562 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
566 armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, ArmnnType);
567 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
575 return SimplePooling2dTestImpl<ArmnnType>(
576 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
579 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
580 LayerTestResult<T, 4> L2Pooling2dSize3Stride4TestCommon(
581 armnn::IWorkloadFactory& workloadFactory,
582 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
586 armnn::Pooling2dDescriptor descriptor;
587 descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
588 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
589 descriptor.m_StrideX = descriptor.m_StrideY = 4;
590 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
592 armnn::TensorInfo inputTensorInfo({ 1, 1, 7, 7 }, ArmnnType);
593 auto input = MakeTensor<T, 4>(inputTensorInfo,
595 2.0f, 1.0f, 5.0f, 0.0f, 2.0f, 1.0f, 5.0f,
596 1.0f, 2.0f, 2.0f, 0.0f, 1.0f, 2.0f, 2.0f,
597 5.0f, 4.0f, 1.0f, 0.0f, 5.0f, 4.0f, 1.0f,
598 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
599 2.0f, 1.0f, 5.0f, 0.0f, 2.0f, 1.0f, 5.0f,
600 1.0f, 2.0f, 2.0f, 0.0f, 1.0f, 2.0f, 2.0f,
601 5.0f, 4.0f, 1.0f, 0.0f, 5.0f, 4.0f, 1.0f,
605 armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, ArmnnType);
606 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
613 return SimplePooling2dTestImpl<ArmnnType>(
614 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
617 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
618 LayerTestResult<T, 4> L2Pooling2dSize7TestCommon(
619 armnn::IWorkloadFactory& workloadFactory,
620 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
624 armnn::Pooling2dDescriptor descriptor;
625 descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
626 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 7;
627 descriptor.m_StrideX = descriptor.m_StrideY = 7;
628 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
630 armnn::TensorInfo inputTensorInfo({ 1, 1, 7, 7 }, ArmnnType);
631 auto input = MakeTensor<T, 4>(inputTensorInfo,
633 1.0f, 0.0f, 2.0f, 0.0f, 3.0f, 0.0f, 4.0f,
634 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
635 0.0f, 5.0f, 0.0f, 6.0f, 0.0f, 7.0f, 0.0f,
636 8.0f, 0.0f, 9.0f, 0.0f, 10.0f, 0.0f, 5.0f,
637 0.0f, 5.0f, 0.0f, 2.0f, 0.0f, 1.0f, 1.0f,
638 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
639 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
643 armnn::TensorInfo outputTensorInfo({ 1, 1, 1, 1 }, ArmnnType);
644 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
650 return SimplePooling2dTestImpl<ArmnnType>(
651 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
654 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
655 LayerTestResult<T, 4> L2Pooling2dSize9TestCommon(
656 armnn::IWorkloadFactory& workloadFactory,
657 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
661 armnn::Pooling2dDescriptor descriptor;
662 descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
663 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 9;
664 descriptor.m_StrideX = descriptor.m_StrideY = 9;
665 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
667 armnn::TensorInfo inputTensorInfo({ 1, 1, 9, 9 }, ArmnnType);
668 auto input = MakeTensor<T, 4>(inputTensorInfo,
670 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
671 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
672 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
673 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
674 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
675 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
676 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
677 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
678 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
682 armnn::TensorInfo outputTensorInfo({ 1, 1, 1, 1 }, ArmnnType);
683 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
689 return SimplePooling2dTestImpl<ArmnnType>(
690 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
693 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
694 LayerTestResult<T, 4> AsymmetricNonSquarePooling2dTestCommon(
695 armnn::IWorkloadFactory& workloadFactory,
696 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
700 armnn::TensorInfo inputTensorInfo({ 1, 1, 1, 3 }, ArmnnType);
701 armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, ArmnnType);
703 armnn::Pooling2dDescriptor descriptor;
704 descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
705 descriptor.m_PoolWidth = 2;
706 descriptor.m_PoolHeight = 3;
707 descriptor.m_StrideX = 2;
708 descriptor.m_StrideY = 1;
709 descriptor.m_PadLeft = 2;
710 descriptor.m_PadRight = 0;
711 descriptor.m_PadTop = 1;
712 descriptor.m_PadBottom = 2;
713 descriptor.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
714 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
716 // Construct input data.
717 auto input = MakeTensor<T, 4>(inputTensorInfo,
723 // These were calculated manually.
724 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
726 0.0f, 3.0f, 0.0f, 3.0f,
730 return SimplePooling2dTestImpl<ArmnnType>(
731 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
734 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
735 LayerTestResult<T, 4> ComparePooling2dTestCommon(
736 armnn::IWorkloadFactory& workloadFactory,
737 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
738 armnn::IWorkloadFactory& refWorkloadFactory,
739 armnn::PoolingAlgorithm poolingType,
743 boost::ignore_unused(memoryManager);
744 const unsigned int inputWidth = 16;
745 const unsigned int inputHeight = 32;
746 const unsigned int channelCount = 2;
747 const unsigned int batchSize = 5;
749 const unsigned int poolSize = 3;
750 const unsigned int strideX = 2;
751 const unsigned int strideY = 4;
752 const unsigned int padX = 0;
753 const unsigned int padY = 0;
755 const unsigned int outputWidth = (inputWidth + 2 * padX + strideX - poolSize) / strideX;
756 const unsigned int outputHeight = (inputHeight + 2 * padY + strideY - poolSize) / strideY;
758 armnn::TensorInfo inputTensorInfo;
759 armnn::TensorInfo outputTensorInfo;
761 unsigned int inputShape[] = { batchSize, channelCount, inputHeight, inputWidth };
762 unsigned int outputShape[] = { batchSize, channelCount, outputHeight, outputWidth };
764 inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType);
765 outputTensorInfo = armnn::TensorInfo(4, outputShape, ArmnnType);
767 // Set quantization parameters if the requested type is a quantized type.
768 if(armnn::IsQuantizedType<T>())
770 inputTensorInfo.SetQuantizationScale(qScale);
771 inputTensorInfo.SetQuantizationOffset(qOffset);
772 outputTensorInfo.SetQuantizationScale(qScale);
773 outputTensorInfo.SetQuantizationOffset(qOffset);
776 boost::multi_array<T, 4> input = MakeRandomTensor<T, 4>(inputTensorInfo, 81715);
778 LayerTestResult<T, 4> comparisonResult(outputTensorInfo);
780 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
781 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
783 armnn::Pooling2dQueueDescriptor data;
784 armnn::WorkloadInfo info;
785 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
786 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
787 data.m_Parameters.m_PoolType = poolingType;
788 data.m_Parameters.m_PoolWidth = poolSize;
789 data.m_Parameters.m_PoolHeight = poolSize;
790 data.m_Parameters.m_StrideX = strideX;
791 data.m_Parameters.m_StrideY = strideY;
792 data.m_Parameters.m_PadLeft = padX;
793 data.m_Parameters.m_PadRight = padX;
794 data.m_Parameters.m_PadTop = padY;
795 data.m_Parameters.m_PadBottom = padY;
796 data.m_Parameters.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
798 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
799 std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refWorkloadFactory.CreateTensorHandle(inputTensorInfo);
801 // Don't execute if Pooling is not supported, as an exception will be raised.
802 armnn::BackendId backend = workloadFactory.GetBackendId();
803 const size_t reasonIfUnsupportedMaxLen = 255;
804 char reasonIfUnsupported[reasonIfUnsupportedMaxLen+1];
805 comparisonResult.supported = armnn::IsPooling2dSupported(backend, inputTensorInfo, outputTensorInfo,
807 reasonIfUnsupported, reasonIfUnsupportedMaxLen);
808 if (!comparisonResult.supported)
810 return comparisonResult;
813 armnn::Pooling2dQueueDescriptor refData = data;
814 armnn::WorkloadInfo refInfo = info;
815 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
816 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
818 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePooling2d(data, info);
819 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreatePooling2d(refData, refInfo);
821 outputHandleRef->Allocate();
822 inputHandleRef->Allocate();
823 inputHandle->Allocate();
824 outputHandle->Allocate();
826 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
827 CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]);
830 workloadRef->Execute();
832 CopyDataFromITensorHandle(&comparisonResult.output[0][0][0][0], outputHandle.get());
833 CopyDataFromITensorHandle(&comparisonResult.outputExpected[0][0][0][0], outputHandleRef.get());
835 return comparisonResult;
839 // Tests max pooling with the following parameters:
847 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
848 LayerTestResult<T, 4> SimpleMaxPooling2dSize2x2Stride2x2TestCommon(
849 armnn::IWorkloadFactory& workloadFactory,
850 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
855 armnn::Pooling2dDescriptor descriptor;
856 descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
857 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
858 descriptor.m_StrideX = 2;
859 descriptor.m_StrideY = 2;
860 descriptor.m_PadLeft = descriptor.m_PadRight = forceNoPadding ? 0 : 3;
861 descriptor.m_PadTop = descriptor.m_PadBottom = 0;
862 descriptor.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
863 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
866 unsigned int inputWidth = 4;
868 unsigned int inputHeight = 4;
870 unsigned int outputWidth =
871 (inputWidth + descriptor.m_PadLeft + descriptor.m_PadRight + descriptor.m_StrideX - descriptor.m_PoolWidth) /
872 descriptor.m_StrideX;
873 unsigned int outputHeight =
874 (inputHeight + descriptor.m_PadTop + descriptor.m_PadBottom + descriptor.m_StrideY - descriptor.m_PoolHeight) /
875 descriptor.m_StrideY;
876 unsigned int channels = 1;
877 unsigned int batchSize = 1;
879 std::vector<float> inputData = {
880 510.0f, 222.0f, 780.0f, 654.0f,
881 141.0f, 276.0f, 15.0f, 546.0f,
882 303.0f, 618.0f, 582.0f, 339.0f,
883 438.0f, 564.0f, 573.0f, 402.0f
886 // Note that left and right edges will be 0.f, due to the 2x2 max pooling only accessing zeros here.
887 std::vector<float> expectedOutputDataWithPadding = {
888 0.0f, 510.0f, 780.0f, 654.0f, 0.0f,
889 0.0f, 438.0f, 618.0f, 402.0f, 0.0f
892 std::vector<float> expectedOutputDataNoPadding = {
897 armnn::TensorInfo inputTensorInfo({ batchSize, channels, inputHeight, inputWidth }, ArmnnType);
899 // Scale and offset should match input - we're just calculating maximum values.
900 armnn::TensorInfo outputTensorInfo({ batchSize, channels, outputHeight, outputWidth }, ArmnnType);
902 // Set quantization parameters if the requested type is a quantized type.
903 if(armnn::IsQuantizedType<T>())
905 inputTensorInfo.SetQuantizationScale(qScale);
906 inputTensorInfo.SetQuantizationOffset(qOffset);
907 outputTensorInfo.SetQuantizationScale(qScale);
908 outputTensorInfo.SetQuantizationOffset(qOffset);
911 auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(inputData, qScale, qOffset));
913 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
914 forceNoPadding ? QuantizedVector<T>(expectedOutputDataNoPadding, qScale, qOffset) :
915 QuantizedVector<T>(expectedOutputDataWithPadding, qScale, qOffset));
917 return SimplePooling2dTestImpl<ArmnnType>(
918 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
922 // Tests max pooling with the following parameters:
930 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
931 LayerTestResult<T, 4> IgnorePaddingAveragePooling2dSize3x2Stride2x2TestCommon(
932 armnn::IWorkloadFactory& workloadFactory,
933 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
938 armnn::Pooling2dDescriptor descriptor;
939 descriptor.m_PoolType = armnn::PoolingAlgorithm::Average;
940 descriptor.m_PoolWidth = 3;
941 descriptor.m_PoolHeight = 2;
942 descriptor.m_StrideX = 2;
943 descriptor.m_StrideY = 2;
944 descriptor.m_PadLeft = (forceNoPadding) ? 0 : 1;
945 descriptor.m_PadRight = descriptor.m_PadLeft;
946 descriptor.m_PadTop = 0;
947 descriptor.m_PadBottom = 0;
948 descriptor.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
949 descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
951 unsigned int inputWidth = 3;
952 unsigned int inputHeight = 2;
953 unsigned int outputWidth =
954 (inputWidth + descriptor.m_PadLeft + descriptor.m_PadRight + descriptor.m_StrideX - descriptor.m_PoolWidth) /
955 descriptor.m_StrideX;
956 unsigned int outputHeight =
957 (inputHeight + descriptor.m_PadTop + descriptor.m_PadBottom + descriptor.m_StrideY - descriptor.m_PoolHeight) /
958 descriptor.m_StrideY;
959 unsigned int channels = 1;
960 unsigned int batchSize = 1;
962 std::vector<float> inputData = {
967 std::vector<float> expectedOutputDataWithPadding = {
971 std::vector<float> expectedOutputDataNoPadding = {
975 armnn::TensorInfo inputTensorInfo({ batchSize, channels, inputHeight, inputWidth }, ArmnnType);
977 // Scale and offset should match input - we're just calculating average values.
978 armnn::TensorInfo outputTensorInfo({ batchSize, channels, outputHeight, outputWidth }, ArmnnType);
980 // Set quantization parameters if the requested type is a quantized type.
981 if(armnn::IsQuantizedType<T>())
983 inputTensorInfo.SetQuantizationScale(qScale);
984 inputTensorInfo.SetQuantizationOffset(qOffset);
985 outputTensorInfo.SetQuantizationScale(qScale);
986 outputTensorInfo.SetQuantizationOffset(qOffset);
989 auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(inputData, qScale, qOffset));
991 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
992 forceNoPadding ? QuantizedVector<T>(expectedOutputDataNoPadding, qScale, qOffset) :
993 QuantizedVector<T>(expectedOutputDataWithPadding, qScale, qOffset));
995 return SimplePooling2dTestImpl<ArmnnType>(
996 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
1000 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
1001 LayerTestResult<T, 4> IgnorePaddingSimpleMaxPooling2dTestCommon(
1002 armnn::IWorkloadFactory& workloadFactory,
1003 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1004 float qScale = 1.0f,
1005 int32_t qOffset = 0)
1007 armnn::Pooling2dDescriptor descriptor;
1008 descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
1009 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
1010 descriptor.m_StrideX = descriptor.m_StrideY = 2;
1011 descriptor.m_PadLeft = 1;
1012 descriptor.m_PadRight = 1;
1013 descriptor.m_PadTop = 1;
1014 descriptor.m_PadBottom = 1;
1015 descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
1017 armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
1018 armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, ArmnnType);
1020 // Set quantization parameters if the requested type is a quantized type.
1021 if(armnn::IsQuantizedType<T>())
1023 inputTensorInfo.SetQuantizationScale(qScale);
1024 inputTensorInfo.SetQuantizationOffset(qOffset);
1025 outputTensorInfo.SetQuantizationScale(qScale);
1026 outputTensorInfo.SetQuantizationOffset(qOffset);
1029 auto input = MakeTensor<T, 4>(inputTensorInfo,
1030 QuantizedVector<T>({
1031 -1.0f, -2.0f, 3.0f, 4.0f,
1032 -1.0f, -2.0f, 3.0f, 4.0f,
1033 1.0f, 2.0f, -3.0f, -4.0f,
1034 1.0f, 2.0f, -3.0f, -4.0f,
1038 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
1039 QuantizedVector<T>({
1046 return SimplePooling2dTestImpl<ArmnnType>(
1047 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
1050 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
1051 LayerTestResult<T, 4> IgnorePaddingMaxPooling2dSize3TestCommon(
1052 armnn::IWorkloadFactory& workloadFactory,
1053 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1054 float qScale = 1.0f,
1055 int32_t qOffset = 0)
1057 armnn::Pooling2dDescriptor descriptor;
1058 descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
1059 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
1060 descriptor.m_StrideX = descriptor.m_StrideY = 1;
1061 descriptor.m_PadLeft = 1;
1062 descriptor.m_PadRight = 1;
1063 descriptor.m_PadTop = 1;
1064 descriptor.m_PadBottom = 1;
1065 descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
1067 armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
1068 armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
1070 // Set quantization parameters if the requested type is a quantized type.
1071 if(armnn::IsQuantizedType<T>())
1073 inputTensorInfo.SetQuantizationScale(qScale);
1074 inputTensorInfo.SetQuantizationOffset(qOffset);
1075 outputTensorInfo.SetQuantizationScale(qScale);
1076 outputTensorInfo.SetQuantizationOffset(qOffset);
1079 auto input = MakeTensor<T, 4>(inputTensorInfo,
1080 QuantizedVector<T>({
1081 -1.0f, -2.0f, 3.0f, 4.0f,
1082 -1.0f, -2.0f, 3.0f, 4.0f,
1083 1.0f, 2.0f, -3.0f, -4.0f,
1084 1.0f, 2.0f, -3.0f, -4.0f,
1088 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
1089 QuantizedVector<T>({
1090 -1.0f, 3.0f, 4.0f, 4.0f,
1091 2.0f, 3.0f, 4.0f, 4.0f,
1092 2.0f, 3.0f, 4.0f, 4.0f,
1093 2.0f, 2.0f, 2.0f, -3.0f,
1097 return SimplePooling2dTestImpl<ArmnnType>(
1098 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
1101 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
1102 LayerTestResult<T, 4> IgnorePaddingSimpleAveragePooling2dTestCommon(
1103 armnn::IWorkloadFactory& workloadFactory,
1104 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1105 float qScale = 1.0f,
1106 int32_t qOffset = 0)
1108 armnn::Pooling2dDescriptor descriptor;
1109 descriptor.m_PoolType = armnn::PoolingAlgorithm::Average;
1110 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
1111 descriptor.m_StrideX = descriptor.m_StrideY = 2;
1112 descriptor.m_PadLeft = 1;
1113 descriptor.m_PadRight = 1;
1114 descriptor.m_PadTop = 1;
1115 descriptor.m_PadBottom = 1;
1116 descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
1118 armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
1119 armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, ArmnnType);
1121 // Set quantization parameters if the requested type is a quantized type.
1122 if(armnn::IsQuantizedType<T>())
1124 inputTensorInfo.SetQuantizationScale(qScale);
1125 inputTensorInfo.SetQuantizationOffset(qOffset);
1126 outputTensorInfo.SetQuantizationScale(qScale);
1127 outputTensorInfo.SetQuantizationOffset(qOffset);
1130 auto input = MakeTensor<T, 4>(inputTensorInfo,
1131 QuantizedVector<T>({
1132 12.0f, 20.0f, 32.0f, 40.0f,
1133 12.0f, 20.0f, 32.0f, 40.0f,
1134 12.0f, 20.0f, 32.0f, 40.0f,
1135 12.0f, 20.0f, 32.0f, 40.0f,
1139 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
1140 QuantizedVector<T>({
1147 return SimplePooling2dTestImpl<ArmnnType>(
1148 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
1151 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
1152 LayerTestResult<T, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon(
1153 armnn::IWorkloadFactory& workloadFactory,
1154 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1155 float qScale = 1.0f,
1156 int32_t qOffset = 0)
1158 armnn::Pooling2dDescriptor descriptor;
1159 descriptor.m_PoolType = armnn::PoolingAlgorithm::Average;
1160 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
1161 descriptor.m_StrideX = descriptor.m_StrideY = 2;
1162 descriptor.m_PadLeft = 0;
1163 descriptor.m_PadRight = 0;
1164 descriptor.m_PadTop = 0;
1165 descriptor.m_PadBottom = 0;
1166 descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
1167 descriptor.m_OutputShapeRounding = armnn::OutputShapeRounding::Ceiling;
1169 armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4}, ArmnnType);
1170 armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, ArmnnType);
1172 // Set quantization parameters if the requested type is a quantized type.
1173 if(armnn::IsQuantizedType<T>())
1175 inputTensorInfo.SetQuantizationScale(qScale);
1176 inputTensorInfo.SetQuantizationOffset(qOffset);
1177 outputTensorInfo.SetQuantizationScale(qScale);
1178 outputTensorInfo.SetQuantizationOffset(qOffset);
1181 auto input = MakeTensor<T, 4>(inputTensorInfo,
1182 QuantizedVector<T>({
1183 1.0f, 2.0f, 3.0f, 4.0f,
1184 1.0f, 2.0f, 3.0f, 4.0f,
1185 1.0f, 2.0f, 3.0f, 4.0f,
1186 1.0f, 2.0f, 3.0f, 4.0f,
1190 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
1191 QuantizedVector<T>({
1197 return SimplePooling2dTestImpl<ArmnnType>(
1198 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
1201 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
1202 LayerTestResult<T, 4> IgnorePaddingAveragePooling2dSize3TestCommon(
1203 armnn::IWorkloadFactory& workloadFactory,
1204 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1205 float qScale = 1.0f,
1206 int32_t qOffset = 0)
1208 armnn::Pooling2dDescriptor descriptor;
1209 descriptor.m_PoolType = armnn::PoolingAlgorithm::Average;
1210 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
1211 descriptor.m_StrideX = descriptor.m_StrideY = 1;
1212 descriptor.m_PadLeft = 1;
1213 descriptor.m_PadRight = 1;
1214 descriptor.m_PadTop = 1;
1215 descriptor.m_PadBottom = 1;
1216 descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
1218 armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
1219 armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
1221 // Set quantization parameters if the requested type is a quantized type.
1222 if(armnn::IsQuantizedType<T>())
1224 inputTensorInfo.SetQuantizationScale(qScale);
1225 inputTensorInfo.SetQuantizationOffset(qOffset);
1226 outputTensorInfo.SetQuantizationScale(qScale);
1227 outputTensorInfo.SetQuantizationOffset(qOffset);
1230 auto input = MakeTensor<T, 4>(inputTensorInfo,
1231 QuantizedVector<T>({
1232 9.0f, 27.0f, 18.0f, 36.0f,
1233 18.0f, 9.0f, 18.0f, 9.0f,
1234 27.0f, 18.0f, 9.0f, 27.0f,
1235 9.0f, 27.0f, 9.0f, 18.0f,
1239 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
1240 QuantizedVector<T>({
1241 7.0f, 11.0f, 13.0f, 9.0f,
1242 12.0f, 17.0f, 19.0f, 13.0f,
1243 12.0f, 16.0f, 16.0f, 10.0f,
1244 9.0f, 11.0f, 12.0f, 7.0f,
1248 return SimplePooling2dTestImpl<ArmnnType>(
1249 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
1252 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
1253 LayerTestResult<T, 4> IgnorePaddingSimpleL2Pooling2dTestCommon(
1254 armnn::IWorkloadFactory& workloadFactory,
1255 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1256 float qScale = 1.0f,
1257 int32_t qOffset = 0)
1259 armnn::Pooling2dDescriptor descriptor;
1260 descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
1261 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
1262 descriptor.m_StrideX = descriptor.m_StrideY = 2;
1263 descriptor.m_PadLeft = 1;
1264 descriptor.m_PadRight = 1;
1265 descriptor.m_PadTop = 1;
1266 descriptor.m_PadBottom = 1;
1267 descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
1269 armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
1270 armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, ArmnnType);
1272 // Set quantization parameters if the requested type is a quantized type.
1273 if(armnn::IsQuantizedType<T>())
1275 inputTensorInfo.SetQuantizationScale(qScale);
1276 inputTensorInfo.SetQuantizationOffset(qOffset);
1277 outputTensorInfo.SetQuantizationScale(qScale);
1278 outputTensorInfo.SetQuantizationOffset(qOffset);
1281 auto input = MakeTensor<T, 4>(inputTensorInfo,
1282 QuantizedVector<T>({
1283 2.0f, 4.0f, 8.0f, 16.0f,
1284 4.0f, 2.0f, 2.0f, 4.0f,
1285 8.0f, 2.0f, 4.0f, 2.0f,
1286 16.0f, 2.0f, 2.0f, 8.0f,
1290 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
1291 QuantizedVector<T>({
1292 1.0f, 4.4721f, 8.0f,
1293 4.4721f, 2.6457f, 2.236f,
1294 8.0f, 1.4142f, 4.0f,
1298 return SimplePooling2dTestImpl<ArmnnType>(
1299 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
1302 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
1303 LayerTestResult<T, 4> IgnorePaddingL2Pooling2dSize3TestCommon(
1304 armnn::IWorkloadFactory& workloadFactory,
1305 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1306 float qScale = 1.0f,
1307 int32_t qOffset = 0)
1309 armnn::Pooling2dDescriptor descriptor;
1310 descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
1311 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
1312 descriptor.m_StrideX = descriptor.m_StrideY = 1;
1313 descriptor.m_PadLeft = 1;
1314 descriptor.m_PadRight = 1;
1315 descriptor.m_PadTop = 1;
1316 descriptor.m_PadBottom = 1;
1317 descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
1319 armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
1320 armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
1322 // Set quantization parameters if the requested type is a quantized type.
1323 if(armnn::IsQuantizedType<T>())
1325 inputTensorInfo.SetQuantizationScale(qScale);
1326 inputTensorInfo.SetQuantizationOffset(qOffset);
1327 outputTensorInfo.SetQuantizationScale(qScale);
1328 outputTensorInfo.SetQuantizationOffset(qOffset);
1331 auto input = MakeTensor<T, 4>(inputTensorInfo,
1332 QuantizedVector<T>({
1333 1.0f, 2.0f, 3.0f, 4.0f,
1334 1.0f, 2.0f, 3.0f, 4.0f,
1335 1.0f, 2.0f, 3.0f, 4.0f,
1336 1.0f, 2.0f, 3.0f, 4.0f,
1340 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
1341 QuantizedVector<T>({
1342 1.0540f, 1.7638f, 2.5385f, 2.3570f,
1343 1.2909f, 2.1602f, 3.1091f, 2.8867f,
1344 1.2909f, 2.1602f, 3.1091f, 2.8867f,
1345 1.0540f, 1.7638f, 2.5385f, 2.3570f,
1349 return SimplePooling2dTestImpl<ArmnnType>(
1350 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
1353 } // anonymous namespace
1355 LayerTestResult<float, 4> SimpleMaxPooling2dSize2x2Stride2x2Test(
1356 armnn::IWorkloadFactory& workloadFactory,
1357 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1358 bool forceNoPadding)
1360 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::Float32>(
1361 workloadFactory, memoryManager, forceNoPadding);
1364 LayerTestResult<uint8_t, 4> SimpleMaxPooling2dSize2x2Stride2x2Uint8Test(
1365 armnn::IWorkloadFactory& workloadFactory,
1366 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1367 bool forceNoPadding)
1369 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::QuantisedAsymm8>(
1370 workloadFactory, memoryManager, forceNoPadding, 3.0f, -5);
1373 LayerTestResult<int16_t, 4> SimpleMaxPooling2dSize2x2Stride2x2Int16Test(
1374 armnn::IWorkloadFactory& workloadFactory,
1375 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1376 bool forceNoPadding)
1378 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::QuantisedSymm16>(
1379 workloadFactory, memoryManager, forceNoPadding);
1382 LayerTestResult<float, 4> SimpleMaxPooling2dSize3x3Stride2x4Test(
1383 armnn::IWorkloadFactory& workloadFactory,
1384 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1385 bool forceNoPadding)
1387 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::Float32>(
1388 workloadFactory, memoryManager, forceNoPadding);
1391 LayerTestResult<uint8_t, 4> SimpleMaxPooling2dSize3x3Stride2x4Uint8Test(
1392 armnn::IWorkloadFactory& workloadFactory,
1393 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1394 bool forceNoPadding)
1396 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::QuantisedAsymm8>(
1397 workloadFactory, memoryManager, forceNoPadding, 0.1f, 128);
1400 LayerTestResult<int16_t, 4> SimpleMaxPooling2dSize3x3Stride2x4Int16Test(
1401 armnn::IWorkloadFactory& workloadFactory,
1402 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1403 bool forceNoPadding)
1405 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::QuantisedSymm16>(
1406 workloadFactory, memoryManager, forceNoPadding);
1409 LayerTestResult<float, 4> SimpleMaxPooling2dTest(
1410 armnn::IWorkloadFactory& workloadFactory,
1411 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1412 const armnn::DataLayout dataLayout)
1414 return SimpleMaxPooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
1417 LayerTestResult<uint8_t, 4> SimpleMaxPooling2dUint8Test(
1418 armnn::IWorkloadFactory& workloadFactory,
1419 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1420 const armnn::DataLayout dataLayout)
1422 return SimpleMaxPooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, dataLayout);
1425 LayerTestResult<int16_t, 4> SimpleMaxPooling2dInt16Test(
1426 armnn::IWorkloadFactory& workloadFactory,
1427 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1428 const armnn::DataLayout dataLayout)
1430 return SimpleMaxPooling2dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, dataLayout);
1432 LayerTestResult<float, 4> IgnorePaddingSimpleMaxPooling2dTest(
1433 armnn::IWorkloadFactory& workloadFactory,
1434 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1436 return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1439 LayerTestResult<uint8_t, 4> IgnorePaddingSimpleMaxPooling2dUint8Test(
1440 armnn::IWorkloadFactory& workloadFactory,
1441 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1443 return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
1444 workloadFactory, memoryManager, 1.0f, -5);
1447 LayerTestResult<int16_t, 4> IgnorePaddingSimpleMaxPooling2dInt16Test(
1448 armnn::IWorkloadFactory& workloadFactory,
1449 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1451 return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::QuantisedSymm16>(
1452 workloadFactory, memoryManager);
1455 LayerTestResult<float, 4> IgnorePaddingMaxPooling2dSize3Test(
1456 armnn::IWorkloadFactory& workloadFactory,
1457 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1459 return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1462 LayerTestResult<uint8_t, 4> IgnorePaddingMaxPooling2dSize3Uint8Test(
1463 armnn::IWorkloadFactory& workloadFactory,
1464 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1466 return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::QuantisedAsymm8>(
1467 workloadFactory, memoryManager, 1.0f, -5);
1470 LayerTestResult<int16_t, 4> IgnorePaddingMaxPooling2dSize3Int16Test(
1471 armnn::IWorkloadFactory& workloadFactory,
1472 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1474 return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::QuantisedSymm16>(
1475 workloadFactory, memoryManager);
1478 LayerTestResult<float, 4> SimpleAveragePooling2dTest(
1479 armnn::IWorkloadFactory& workloadFactory,
1480 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1481 const armnn::DataLayout dataLayout)
1483 return SimpleAveragePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
1486 LayerTestResult<uint8_t, 4> SimpleAveragePooling2dUint8Test(
1487 armnn::IWorkloadFactory& workloadFactory,
1488 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1489 const armnn::DataLayout dataLayout)
1491 return SimpleAveragePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
1492 workloadFactory, memoryManager, dataLayout, 0.5, -1);
1495 LayerTestResult<int16_t, 4> SimpleAveragePooling2dInt16Test(
1496 armnn::IWorkloadFactory& workloadFactory,
1497 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1498 const armnn::DataLayout dataLayout)
1500 return SimpleAveragePooling2dTestCommon<armnn::DataType::QuantisedSymm16>(
1501 workloadFactory, memoryManager, dataLayout);
1504 LayerTestResult<float, 4> IgnorePaddingAveragePooling2dSize3x2Stride2x2Test(
1505 armnn::IWorkloadFactory& workloadFactory,
1506 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1507 bool forceNoPadding)
1509 return IgnorePaddingAveragePooling2dSize3x2Stride2x2TestCommon<armnn::DataType::Float32>(
1510 workloadFactory, memoryManager, forceNoPadding);
1513 LayerTestResult<float, 4> LargeTensorsAveragePooling2dTest(
1514 armnn::IWorkloadFactory& workloadFactory,
1515 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1517 return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1520 LayerTestResult<uint8_t, 4> LargeTensorsAveragePooling2dUint8Test(
1521 armnn::IWorkloadFactory& workloadFactory,
1522 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1524 return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
1525 workloadFactory, memoryManager, 0.5, -1);
1528 LayerTestResult<int16_t, 4> LargeTensorsAveragePooling2dInt16Test(
1529 armnn::IWorkloadFactory& workloadFactory,
1530 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1532 return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::QuantisedSymm16>(
1533 workloadFactory, memoryManager);
1535 LayerTestResult<float, 4> IgnorePaddingSimpleAveragePooling2dTest(
1536 armnn::IWorkloadFactory& workloadFactory,
1537 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1539 return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1542 LayerTestResult<uint8_t, 4> IgnorePaddingSimpleAveragePooling2dUint8Test(
1543 armnn::IWorkloadFactory& workloadFactory,
1544 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1546 return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
1547 workloadFactory, memoryManager);
1550 LayerTestResult<int16_t, 4> IgnorePaddingSimpleAveragePooling2dInt16Test(
1551 armnn::IWorkloadFactory& workloadFactory,
1552 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1554 return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::QuantisedSymm16>(
1555 workloadFactory, memoryManager);
1558 LayerTestResult<float, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingTest(
1559 armnn::IWorkloadFactory& workloadFactory,
1560 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1562 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::Float32>(
1563 workloadFactory, memoryManager);
1566 LayerTestResult<uint8_t, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingUint8Test(
1567 armnn::IWorkloadFactory& workloadFactory,
1568 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1570 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::QuantisedAsymm8>(
1571 workloadFactory, memoryManager);
1574 LayerTestResult<int16_t, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingInt16Test(
1575 armnn::IWorkloadFactory& workloadFactory,
1576 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1578 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::QuantisedSymm16>(
1579 workloadFactory, memoryManager);
1582 LayerTestResult<float, 4> IgnorePaddingAveragePooling2dSize3Test(
1583 armnn::IWorkloadFactory& workloadFactory,
1584 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1586 return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1589 LayerTestResult<uint8_t, 4> IgnorePaddingAveragePooling2dSize3Uint8Test(
1590 armnn::IWorkloadFactory& workloadFactory,
1591 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1593 return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::QuantisedAsymm8>(
1594 workloadFactory, memoryManager);
1597 LayerTestResult<int16_t, 4> IgnorePaddingAveragePooling2dSize3Int16Test(
1598 armnn::IWorkloadFactory& workloadFactory,
1599 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1601 return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::QuantisedSymm16>(
1602 workloadFactory, memoryManager);
1605 LayerTestResult<float, 4> SimpleL2Pooling2dTest(
1606 armnn::IWorkloadFactory& workloadFactory,
1607 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1608 const armnn::DataLayout dataLayout)
1610 return SimpleL2Pooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
1613 LayerTestResult<uint8_t, 4> SimpleL2Pooling2dUint8Test(
1614 armnn::IWorkloadFactory& workloadFactory,
1615 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1616 const armnn::DataLayout dataLayout)
1618 return SimpleL2Pooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, dataLayout);
1621 LayerTestResult<int16_t, 4> SimpleL2Pooling2dInt16Test(
1622 armnn::IWorkloadFactory& workloadFactory,
1623 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1624 const armnn::DataLayout dataLayout)
1626 return SimpleL2Pooling2dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, dataLayout);
1629 LayerTestResult<float, 4> L2Pooling2dSize3Stride1Test(
1630 armnn::IWorkloadFactory& workloadFactory,
1631 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1633 return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1636 LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride1Uint8Test(
1637 armnn::IWorkloadFactory& workloadFactory,
1638 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1640 return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
1643 LayerTestResult<int16_t, 4> L2Pooling2dSize3Stride1Int16Test(
1644 armnn::IWorkloadFactory& workloadFactory,
1645 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1647 return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
1650 LayerTestResult<float, 4> L2Pooling2dSize3Stride3Test(
1651 armnn::IWorkloadFactory& workloadFactory,
1652 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1654 return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1657 LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride3Uint8Test(
1658 armnn::IWorkloadFactory& workloadFactory,
1659 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1661 return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
1664 LayerTestResult<int16_t, 4> L2Pooling2dSize3Stride3Int16Test(
1665 armnn::IWorkloadFactory& workloadFactory,
1666 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1668 return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
1670 LayerTestResult<float, 4> L2Pooling2dSize3Stride4Test(
1671 armnn::IWorkloadFactory& workloadFactory,
1672 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1674 return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1677 LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride4Uint8Test(
1678 armnn::IWorkloadFactory& workloadFactory,
1679 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1681 return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
1684 LayerTestResult<int16_t, 4> L2Pooling2dSize3Stride4Int16Test(
1685 armnn::IWorkloadFactory& workloadFactory,
1686 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1688 return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
1691 LayerTestResult<float, 4> L2Pooling2dSize7Test(
1692 armnn::IWorkloadFactory& workloadFactory,
1693 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1695 return L2Pooling2dSize7TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1698 LayerTestResult<uint8_t, 4> L2Pooling2dSize7Uint8Test(
1699 armnn::IWorkloadFactory& workloadFactory,
1700 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1702 return L2Pooling2dSize7TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
1705 LayerTestResult<int16_t, 4> L2Pooling2dSize7Int16Test(
1706 armnn::IWorkloadFactory& workloadFactory,
1707 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1709 return L2Pooling2dSize7TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
1712 LayerTestResult<float, 4> L2Pooling2dSize9Test(
1713 armnn::IWorkloadFactory& workloadFactory,
1714 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1716 return L2Pooling2dSize9TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1719 LayerTestResult<uint8_t, 4> L2Pooling2dSize9Uint8Test(
1720 armnn::IWorkloadFactory& workloadFactory,
1721 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1723 return L2Pooling2dSize9TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
1726 LayerTestResult<int16_t, 4> L2Pooling2dSize9Int16Test(
1727 armnn::IWorkloadFactory& workloadFactory,
1728 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1730 return L2Pooling2dSize9TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
1732 LayerTestResult<float, 4> IgnorePaddingSimpleL2Pooling2dTest(
1733 armnn::IWorkloadFactory& workloadFactory,
1734 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1736 return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1739 LayerTestResult<uint8_t, 4> IgnorePaddingSimpleL2Pooling2dUint8Test(
1740 armnn::IWorkloadFactory& workloadFactory,
1741 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1743 return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
1746 LayerTestResult<int16_t, 4> IgnorePaddingSimpleL2Pooling2dInt16Test(
1747 armnn::IWorkloadFactory& workloadFactory,
1748 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1750 return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
1753 LayerTestResult<float, 4> IgnorePaddingL2Pooling2dSize3Test(
1754 armnn::IWorkloadFactory& workloadFactory,
1755 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1757 return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1760 LayerTestResult<uint8_t, 4> IgnorePaddingL2Pooling2dSize3Uint8Test(
1761 armnn::IWorkloadFactory& workloadFactory,
1762 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1764 return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
1767 LayerTestResult<int16_t, 4> IgnorePaddingL2Pooling2dSize3Int16Test(
1768 armnn::IWorkloadFactory& workloadFactory,
1769 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1771 return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
1774 LayerTestResult<float, 4> AsymmetricNonSquarePooling2dTest(
1775 armnn::IWorkloadFactory& workloadFactory,
1776 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1778 return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1781 LayerTestResult<uint8_t, 4> AsymmetricNonSquarePooling2dUint8Test(
1782 armnn::IWorkloadFactory& workloadFactory,
1783 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1785 return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
1788 LayerTestResult<int16_t, 4> AsymmetricNonSquarePooling2dInt16Test(
1789 armnn::IWorkloadFactory& workloadFactory,
1790 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1792 return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
1795 LayerTestResult<float, 4> ComparePooling2dTest(
1796 armnn::IWorkloadFactory& workloadFactory,
1797 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1798 armnn::IWorkloadFactory& refWorkloadFactory,
1799 armnn::PoolingAlgorithm poolingType)
1801 return ComparePooling2dTestCommon<armnn::DataType::Float32>(
1802 workloadFactory, memoryManager, refWorkloadFactory, poolingType);
1805 LayerTestResult<uint8_t, 4> ComparePooling2dUint8Test(
1806 armnn::IWorkloadFactory& workloadFactory,
1807 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1808 armnn::IWorkloadFactory& refWorkloadFactory,
1809 armnn::PoolingAlgorithm poolingType)
1811 return ComparePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
1812 workloadFactory, memoryManager, refWorkloadFactory, poolingType, 0.1f, 128);
1815 LayerTestResult<int16_t, 4> ComparePooling2dInt16Test(
1816 armnn::IWorkloadFactory& workloadFactory,
1817 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1818 armnn::IWorkloadFactory& refWorkloadFactory,
1819 armnn::PoolingAlgorithm poolingType)
1821 return ComparePooling2dTestCommon<armnn::DataType::QuantisedSymm16>(
1822 workloadFactory, memoryManager, refWorkloadFactory, poolingType);