2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
6 #include "Pooling2dTestImpl.hpp"
8 #include <QuantizeHelper.hpp>
9 #include <ResolveType.hpp>
11 #include <armnn/LayerSupport.hpp>
13 #include <armnnUtils/TensorUtils.hpp>
14 #include <armnnUtils/DataLayoutIndexed.hpp>
15 #include <armnnUtils/Permute.hpp>
17 #include <armnn/utility/IgnoreUnused.hpp>
18 #include <armnn/utility/NumericCast.hpp>
20 #include <backendsCommon/WorkloadInfo.hpp>
22 #include <backendsCommon/test/TensorCopyUtils.hpp>
23 #include <backendsCommon/test/WorkloadTestUtils.hpp>
25 #include <test/TensorHelpers.hpp>
30 using namespace armnnUtils;
32 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
33 LayerTestResult<T, 4> SimplePooling2dTestImpl(
34 armnn::IWorkloadFactory& workloadFactory,
35 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
36 const armnn::ITensorHandleFactory& tensorHandleFactory,
37 armnn::Pooling2dDescriptor descriptor,
40 const boost::multi_array<T, 4>& input,
41 const boost::multi_array<T, 4>& outputExpected)
43 IgnoreUnused(memoryManager);
44 const armnn::DataLayout dataLayout = descriptor.m_DataLayout;
45 const armnnUtils::DataLayoutIndexed dimensionIndices = dataLayout;
46 auto heightIndex = dimensionIndices.GetHeightIndex();
47 auto widthIndex = dimensionIndices.GetWidthIndex();
48 auto channelsIndex = dimensionIndices.GetChannelsIndex();
50 unsigned int inputHeight = armnn::numeric_cast<unsigned int>(input.shape()[heightIndex]);
51 unsigned int inputWidth = armnn::numeric_cast<unsigned int>(input.shape()[widthIndex]);
52 unsigned int inputChannels = armnn::numeric_cast<unsigned int>(input.shape()[channelsIndex]);
53 unsigned int inputBatchSize = armnn::numeric_cast<unsigned int>(input.shape()[0]);
55 unsigned int outputHeight = armnn::numeric_cast<unsigned int>(outputExpected.shape()[heightIndex]);
56 unsigned int outputWidth = armnn::numeric_cast<unsigned int>(outputExpected.shape()[widthIndex]);
57 unsigned int outputChannels = armnn::numeric_cast<unsigned int>(outputExpected.shape()[channelsIndex]);
58 unsigned int outputBatchSize = armnn::numeric_cast<unsigned int>(outputExpected.shape()[0]);
60 armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo(
61 inputBatchSize, inputChannels, inputHeight, inputWidth, dataLayout, ArmnnType);
63 armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo(
64 outputBatchSize, outputChannels, outputHeight, outputWidth, dataLayout, ArmnnType);
66 // Set quantization parameters if the requested type is a quantized type.
67 if(armnn::IsQuantizedType<T>())
69 inputTensorInfo.SetQuantizationScale(qScale);
70 inputTensorInfo.SetQuantizationOffset(qOffset);
71 outputTensorInfo.SetQuantizationScale(qScale);
72 outputTensorInfo.SetQuantizationOffset(qOffset);
75 LayerTestResult<T, 4> result(outputTensorInfo);
77 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
78 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
80 armnn::Pooling2dQueueDescriptor queueDescriptor;
81 queueDescriptor.m_Parameters = descriptor;
82 queueDescriptor.m_Parameters.m_DataLayout = dataLayout;
84 armnn::WorkloadInfo workloadInfo;
85 AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfo, inputHandle.get());
86 AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
88 // Don't execute if Pooling is not supported, as an exception will be raised.
89 armnn::BackendId backend = workloadFactory.GetBackendId();
90 const size_t reasonIfUnsupportedMaxLen = 255;
91 char reasonIfUnsupported[reasonIfUnsupportedMaxLen+1];
92 result.supported = armnn::IsPooling2dSupported(backend, inputTensorInfo, outputTensorInfo,
93 queueDescriptor.m_Parameters,
94 reasonIfUnsupported, reasonIfUnsupportedMaxLen);
95 if (!result.supported)
100 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePooling2d(queueDescriptor, workloadInfo);
102 inputHandle->Allocate();
103 outputHandle->Allocate();
105 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
109 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
111 result.outputExpected = outputExpected;
117 // Tests max pooling with the following parameters:
125 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
126 LayerTestResult<T, 4> SimpleMaxPooling2dSize3x3Stride2x4TestCommon(
127 armnn::IWorkloadFactory& workloadFactory,
128 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
129 const armnn::ITensorHandleFactory& tensorHandleFactory,
134 armnn::Pooling2dDescriptor descriptor;
135 descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
136 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
137 descriptor.m_StrideX = 2;
138 descriptor.m_StrideY = 4;
139 // forceNoPadding is mainly used for compatibility with ARM Compute.
140 // As of 16/05/2017, it errors if padX or padY are equal to or greater than the pool size.
141 descriptor.m_PadLeft = descriptor.m_PadRight = forceNoPadding ? 0 : 3;
142 descriptor.m_PadTop = descriptor.m_PadBottom = 0;
143 descriptor.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
144 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
146 unsigned int inputWidth = 8;
147 unsigned int inputHeight = 13;
148 unsigned int outputWidth =
149 (inputWidth + descriptor.m_PadLeft + descriptor.m_PadRight + descriptor.m_StrideX - descriptor.m_PoolWidth) /
150 descriptor.m_StrideX;
151 unsigned int outputHeight =
152 (inputHeight + descriptor.m_PadTop + descriptor.m_PadBottom + descriptor.m_StrideY - descriptor.m_PoolHeight) /
153 descriptor.m_StrideY;
154 unsigned int channels = 2;
155 unsigned int batchSize = 2;
157 armnn::TensorInfo inputTensorInfo({ batchSize, channels, inputHeight, inputWidth }, ArmnnType);
158 armnn::TensorInfo outputTensorInfo({ batchSize, channels, outputHeight, outputWidth }, ArmnnType);
160 // Set quantization parameters if the requested type is a quantized type.
161 if(armnn::IsQuantizedType<T>())
163 inputTensorInfo.SetQuantizationScale(qScale);
164 inputTensorInfo.SetQuantizationOffset(qOffset);
165 outputTensorInfo.SetQuantizationScale(qScale);
166 outputTensorInfo.SetQuantizationOffset(qOffset);
169 std::vector<float> singleChannelData({
170 0.0f, 4.0f, 8.0f, 1.0f, 6.0f, 4.0f, 5.0f, 8.0f,
171 1.0f, 1.0f, 6.0f, 0.0f, 3.0f, 7.0f, 4.0f, 7.0f,
172 8.0f, 5.0f, 0.0f, 0.0f, 8.0f, 3.0f, 4.0f, 3.0f,
173 8.0f, 2.0f, 5.0f, 4.0f, 1.0f, 9.0f, 2.0f, 0.0f,
174 5.0f, 4.0f, 5.0f, 0.0f, 0.0f, 0.0f, 7.0f, 2.0f,
175 1.0f, 2.0f, 6.0f, 2.0f, 7.0f, 9.0f, 5.0f, 2.0f,
176 9.0f, 7.0f, 3.0f, 1.0f, 3.0f, 4.0f, 8.0f, 3.0f,
177 1.0f, 0.0f, 0.0f, 5.0f, 5.0f, 4.0f, 2.0f, 0.0f,
178 6.0f, 4.0f, 3.0f, 6.0f, 9.0f, 5.0f, 5.0f, 6.0f,
179 8.0f, 7.0f, 9.0f, 6.0f, 1.0f, 4.0f, 1.0f, 9.0f,
180 7.0f, 1.0f, 9.0f, 2.0f, 9.0f, 9.0f, 8.0f, 1.0f,
181 4.0f, 4.0f, 5.0f, 9.0f, 2.0f, 6.0f, 6.0f, 4.0f,
182 3.0f, 5.0f, 4.0f, 0.0f, 1.0f, 5.0f, 9.0f, 7.0f,
185 // Constructs input data.
186 std::vector<float> inputData;
187 auto negator = [](float f) { return -f; };
189 // First image (two channels where the second channel is the negative of the first one).
190 inputData.insert(inputData.end(), singleChannelData.begin(), singleChannelData.end());
191 std::transform(singleChannelData.begin(), singleChannelData.end(), std::back_inserter(inputData), negator);
193 // Second image (same as first image).
194 inputData.insert(inputData.end(), singleChannelData.begin(), singleChannelData.end());
195 std::transform(singleChannelData.begin(), singleChannelData.end(), std::back_inserter(inputData), negator);
197 auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(inputData, qScale, qOffset));
199 // These were calculated manually.
200 auto shape(GetTensorShapeAsArray<4>(outputTensorInfo));
201 boost::multi_array<T, 4> outputExpected(shape);
204 outputExpected = MakeTensor<T, 4>(outputTensorInfo,
226 outputExpected = MakeTensor<T, 4>(outputTensorInfo,
228 0.0f, 8.0f, 8.0f, 8.0f, 8.0f, 8.0f,
229 0.0f, 9.0f, 7.0f, 9.0f, 9.0f, 3.0f,
230 0.0f, 8.0f, 9.0f, 9.0f, 9.0f, 9.0f,
232 0.0f, 0.0f, 0.0f, 0.0f,-3.0f,-3.0f,
233 0.0f,-1.0f, 0.0f, 0.0f, 0.0f,-2.0f,
234 0.0f,-1.0f,-1.0f,-1.0f,-1.0f,-1.0f,
236 0.0f, 8.0f, 8.0f, 8.0f, 8.0f, 8.0f,
237 0.0f, 9.0f, 7.0f, 9.0f, 9.0f, 3.0f,
238 0.0f, 8.0f, 9.0f, 9.0f, 9.0f, 9.0f,
240 0.0f, 0.0f, 0.0f, 0.0f,-3.0f,-3.0f,
241 0.0f,-1.0f, 0.0f, 0.0f, 0.0f,-2.0f,
242 0.0f,-1.0f,-1.0f,-1.0f,-1.0f,-1.0f
247 return SimplePooling2dTestImpl<ArmnnType>(
248 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
251 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
252 LayerTestResult<T, 4> SimpleMaxPooling2dTestCommon(
253 armnn::IWorkloadFactory& workloadFactory,
254 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
255 const armnn::ITensorHandleFactory& tensorHandleFactory,
256 const armnn::DataLayout dataLayout = armnn::DataLayout::NCHW,
260 armnn::Pooling2dDescriptor descriptor;
261 descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
262 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
263 descriptor.m_StrideX = descriptor.m_StrideY = 2;
264 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
265 descriptor.m_DataLayout = dataLayout;
267 armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, ArmnnType);
268 armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 2, 2, dataLayout, ArmnnType);
270 // Set quantization parameters if the requested type is a quantized type.
271 if(armnn::IsQuantizedType<T>())
273 inputTensorInfo.SetQuantizationScale(qScale);
274 inputTensorInfo.SetQuantizationOffset(qOffset);
275 outputTensorInfo.SetQuantizationScale(qScale);
276 outputTensorInfo.SetQuantizationOffset(qOffset);
279 std::vector<T> inputData(
281 1.0f, 2.0f, 5.0f, 6.0f,
282 3.0f, 4.0f, 7.0f, 8.0f,
283 9.0f, 10.0f, 13.0f, 14.0f,
284 11.0f, 12.0f, 15.0f, 16.0f,
286 17.0f, 18.0f, 21.0f, 22.0f,
287 19.0f, 20.0f, 23.0f, 24.0f,
288 25.0f, 26.0f, 29.0f, 30.0f,
289 27.0f, 28.0f, 31.0f, 32.0f,
293 std::vector<T> outputData(
303 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
304 if (dataLayout == armnn::DataLayout::NHWC)
306 std::vector<T> tmp(inputData.size());
307 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(T));
310 std::vector<T> tmp1(outputData.size());
311 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(T));
315 auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
317 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputData);
319 return SimplePooling2dTestImpl<ArmnnType>(
320 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
323 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
324 LayerTestResult<T, 4> SimpleAveragePooling2dTestCommon(
325 armnn::IWorkloadFactory& workloadFactory,
326 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
327 const armnn::ITensorHandleFactory& tensorHandleFactory,
328 armnn::DataLayout dataLayout = armnn::DataLayout::NCHW,
332 armnn::Pooling2dDescriptor descriptor;
333 descriptor.m_PoolType = armnn::PoolingAlgorithm::Average;
334 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
335 descriptor.m_StrideX = descriptor.m_StrideY = 2;
336 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
337 descriptor.m_DataLayout = dataLayout;
339 armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, ArmnnType);
340 armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 2, 2, dataLayout, ArmnnType);
342 // Set quantization parameters if the requested type is a quantized type.
343 if(armnn::IsQuantizedType<T>())
345 inputTensorInfo.SetQuantizationScale(qScale);
346 inputTensorInfo.SetQuantizationOffset(qOffset);
347 outputTensorInfo.SetQuantizationScale(qScale);
348 outputTensorInfo.SetQuantizationOffset(qOffset);
351 std::vector<T> inputData(
353 2.0f, 2.0f, 6.0f, 6.0f,
354 4.0f, 4.0f, 8.0f, 8.0f,
355 10.0f, 12.0f, 14.0f, 16.0f,
356 10.0f, 12.0f, 16.0f, 14.0f,
358 18.0f, 20.0f, 24.0f, 22.0f,
359 20.0f, 18.0f, 22.0f, 24.0f,
360 26.0f, 28.0f, 0.0f, 0.0f,
361 26.0f, 28.0f, 0.0f, 0.0f,
365 std::vector<T> outputData(
375 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
376 if (dataLayout == armnn::DataLayout::NHWC)
378 std::vector<T> tmp(inputData.size());
379 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(T));
382 std::vector<T> tmp1(outputData.size());
383 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(T));
387 auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
389 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputData);
391 return SimplePooling2dTestImpl<ArmnnType>(
392 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
395 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
396 LayerTestResult<T, 4> LargeTensorsAveragePooling2dTestCommon(
397 armnn::IWorkloadFactory& workloadFactory,
398 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
399 const armnn::ITensorHandleFactory& tensorHandleFactory,
403 armnn::Pooling2dDescriptor descriptor;
404 descriptor.m_PoolType = armnn::PoolingAlgorithm::Average;
405 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 100;
406 descriptor.m_StrideX = descriptor.m_StrideY = 5;
407 descriptor.m_PadLeft = 50;
408 descriptor.m_PadRight = 50;
409 descriptor.m_PadTop = 50;
410 descriptor.m_PadBottom = 50;
411 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
413 armnn::TensorInfo inputTensorInfo({ 5, 3, 52, 60 }, ArmnnType);
414 armnn::TensorInfo outputTensorInfo({ 5, 3, 11, 13 }, ArmnnType);
416 // Set quantization parameters if the requested type is a quantized type.
417 if(armnn::IsQuantizedType<T>())
419 inputTensorInfo.SetQuantizationScale(qScale);
420 inputTensorInfo.SetQuantizationOffset(qOffset);
421 outputTensorInfo.SetQuantizationScale(qScale);
422 outputTensorInfo.SetQuantizationOffset(qOffset);
425 std::vector<T> inputVec;
427 for (unsigned int i = 0 ; i < inputTensorInfo.GetShape().GetNumElements(); ++i)
429 inputVec.push_back(1);
432 auto input = MakeTensor<T, 4>(inputTensorInfo, inputVec);
434 std::vector<T> outputVec;
436 for (unsigned int i = 0 ; i < outputTensorInfo.GetShape().GetNumElements(); ++i)
438 outputVec.push_back(1);
441 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputVec);
443 return SimplePooling2dTestImpl<ArmnnType>(
444 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
447 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
448 LayerTestResult<T, 4> SimpleL2Pooling2dTestCommon(
449 armnn::IWorkloadFactory& workloadFactory,
450 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
451 const armnn::ITensorHandleFactory& tensorHandleFactory,
452 armnn::DataLayout dataLayout = armnn::DataLayout::NCHW,
456 armnn::Pooling2dDescriptor descriptor;
457 descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
458 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
459 descriptor.m_StrideX = descriptor.m_StrideY = 2;
460 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
461 descriptor.m_DataLayout = dataLayout;
463 armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, ArmnnType);
464 armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 2, 2, dataLayout, ArmnnType);
466 std::vector<T> inputData(
468 1.0f, 7.0f, 5.0f, 5.0f,
469 1.0f, 7.0f, 5.0f, 5.0f,
470 3.0f, 3.0f, 1.0f, 1.0f,
471 3.0f, 3.0f, 1.0f, 1.0f,
473 1.0f, 7.0f, 0.0f, 0.0f,
474 1.0f, 7.0f, 2.0f, 0.0f,
475 0.0f, 2.0f, 1.0f, 1.0f,
476 0.0f, 0.0f, 1.0f, 1.0f,
480 std::vector<T> outputData(
490 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
491 if (dataLayout == armnn::DataLayout::NHWC)
493 std::vector<T> tmp(inputData.size());
494 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(T));
497 std::vector<T> tmp1(outputData.size());
498 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(T));
502 auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
504 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputData);
506 return SimplePooling2dTestImpl<ArmnnType>(
507 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
510 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
511 LayerTestResult<T, 4> L2Pooling2dSize3Stride1TestCommon(
512 armnn::IWorkloadFactory& workloadFactory,
513 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
514 const armnn::ITensorHandleFactory& tensorHandleFactory,
518 armnn::Pooling2dDescriptor descriptor;
519 descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
520 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
521 descriptor.m_StrideX = descriptor.m_StrideY = 1;
522 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
524 armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
525 auto input = MakeTensor<T, 4>(inputTensorInfo,
527 2.0f, 1.0f, 5.0f, 2.0f,
528 1.0f, 2.0f, 2.0f, 1.0f,
529 5.0f, 4.0f, 1.0f, 5.0f,
530 2.0f, 1.0f, 5.0f, 2.0f,
534 armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, ArmnnType);
535 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
542 return SimplePooling2dTestImpl<ArmnnType>(
543 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
546 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
547 LayerTestResult<T, 4> L2Pooling2dSize3Stride3TestCommon(
548 armnn::IWorkloadFactory& workloadFactory,
549 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
550 const armnn::ITensorHandleFactory& tensorHandleFactory,
554 armnn::Pooling2dDescriptor descriptor;
555 descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
556 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
557 descriptor.m_StrideX = descriptor.m_StrideY = 3;
558 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
560 armnn::TensorInfo inputTensorInfo({ 1, 1, 9, 9 }, ArmnnType);
561 auto input = MakeTensor<T, 4>(inputTensorInfo,
563 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
564 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
565 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
566 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
567 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
568 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
569 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
570 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
571 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
575 armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, ArmnnType);
576 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
584 return SimplePooling2dTestImpl<ArmnnType>(
585 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
588 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
589 LayerTestResult<T, 4> L2Pooling2dSize3Stride4TestCommon(
590 armnn::IWorkloadFactory& workloadFactory,
591 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
592 const armnn::ITensorHandleFactory& tensorHandleFactory,
596 armnn::Pooling2dDescriptor descriptor;
597 descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
598 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
599 descriptor.m_StrideX = descriptor.m_StrideY = 4;
600 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
602 armnn::TensorInfo inputTensorInfo({ 1, 1, 7, 7 }, ArmnnType);
603 auto input = MakeTensor<T, 4>(inputTensorInfo,
605 2.0f, 1.0f, 5.0f, 0.0f, 2.0f, 1.0f, 5.0f,
606 1.0f, 2.0f, 2.0f, 0.0f, 1.0f, 2.0f, 2.0f,
607 5.0f, 4.0f, 1.0f, 0.0f, 5.0f, 4.0f, 1.0f,
608 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
609 2.0f, 1.0f, 5.0f, 0.0f, 2.0f, 1.0f, 5.0f,
610 1.0f, 2.0f, 2.0f, 0.0f, 1.0f, 2.0f, 2.0f,
611 5.0f, 4.0f, 1.0f, 0.0f, 5.0f, 4.0f, 1.0f,
615 armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, ArmnnType);
616 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
623 return SimplePooling2dTestImpl<ArmnnType>(
624 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
627 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
628 LayerTestResult<T, 4> L2Pooling2dSize7TestCommon(
629 armnn::IWorkloadFactory& workloadFactory,
630 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
631 const armnn::ITensorHandleFactory& tensorHandleFactory,
635 armnn::Pooling2dDescriptor descriptor;
636 descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
637 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 7;
638 descriptor.m_StrideX = descriptor.m_StrideY = 7;
639 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
641 armnn::TensorInfo inputTensorInfo({ 1, 1, 7, 7 }, ArmnnType);
642 auto input = MakeTensor<T, 4>(inputTensorInfo,
644 1.0f, 0.0f, 2.0f, 0.0f, 3.0f, 0.0f, 4.0f,
645 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
646 0.0f, 5.0f, 0.0f, 6.0f, 0.0f, 7.0f, 0.0f,
647 8.0f, 0.0f, 9.0f, 0.0f, 10.0f, 0.0f, 5.0f,
648 0.0f, 5.0f, 0.0f, 2.0f, 0.0f, 1.0f, 1.0f,
649 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
650 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
654 armnn::TensorInfo outputTensorInfo({ 1, 1, 1, 1 }, ArmnnType);
655 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
661 return SimplePooling2dTestImpl<ArmnnType>(
662 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
665 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
666 LayerTestResult<T, 4> L2Pooling2dSize9TestCommon(
667 armnn::IWorkloadFactory& workloadFactory,
668 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
669 const armnn::ITensorHandleFactory& tensorHandleFactory,
673 armnn::Pooling2dDescriptor descriptor;
674 descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
675 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 9;
676 descriptor.m_StrideX = descriptor.m_StrideY = 9;
677 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
679 armnn::TensorInfo inputTensorInfo({ 1, 1, 9, 9 }, ArmnnType);
680 auto input = MakeTensor<T, 4>(inputTensorInfo,
682 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
683 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
684 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
685 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
686 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
687 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
688 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
689 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
690 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
694 armnn::TensorInfo outputTensorInfo({ 1, 1, 1, 1 }, ArmnnType);
695 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
701 return SimplePooling2dTestImpl<ArmnnType>(
702 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
705 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
706 LayerTestResult<T, 4> AsymmetricNonSquarePooling2dTestCommon(
707 armnn::IWorkloadFactory& workloadFactory,
708 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
709 const armnn::ITensorHandleFactory& tensorHandleFactory,
713 armnn::TensorInfo inputTensorInfo({ 1, 1, 1, 3 }, ArmnnType);
714 armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, ArmnnType);
716 armnn::Pooling2dDescriptor descriptor;
717 descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
718 descriptor.m_PoolWidth = 2;
719 descriptor.m_PoolHeight = 3;
720 descriptor.m_StrideX = 2;
721 descriptor.m_StrideY = 1;
722 descriptor.m_PadLeft = 2;
723 descriptor.m_PadRight = 0;
724 descriptor.m_PadTop = 1;
725 descriptor.m_PadBottom = 2;
726 descriptor.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
727 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
729 // Construct input data.
730 auto input = MakeTensor<T, 4>(inputTensorInfo,
736 // These were calculated manually.
737 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
739 0.0f, 3.0f, 0.0f, 3.0f,
743 return SimplePooling2dTestImpl<ArmnnType>(
744 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
747 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
748 LayerTestResult<T, 4> ComparePooling2dTestCommon(
749 armnn::IWorkloadFactory& workloadFactory,
750 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
751 armnn::IWorkloadFactory& refWorkloadFactory,
752 const armnn::ITensorHandleFactory& tensorHandleFactory,
753 const armnn::ITensorHandleFactory& refTensorHandleFactory,
754 armnn::PoolingAlgorithm poolingType,
758 IgnoreUnused(memoryManager);
759 const unsigned int inputWidth = 16;
760 const unsigned int inputHeight = 32;
761 const unsigned int channelCount = 2;
762 const unsigned int batchSize = 5;
764 const unsigned int poolSize = 3;
765 const unsigned int strideX = 2;
766 const unsigned int strideY = 4;
767 const unsigned int padX = 0;
768 const unsigned int padY = 0;
770 const unsigned int outputWidth = (inputWidth + 2 * padX + strideX - poolSize) / strideX;
771 const unsigned int outputHeight = (inputHeight + 2 * padY + strideY - poolSize) / strideY;
773 armnn::TensorInfo inputTensorInfo;
774 armnn::TensorInfo outputTensorInfo;
776 unsigned int inputShape[] = { batchSize, channelCount, inputHeight, inputWidth };
777 unsigned int outputShape[] = { batchSize, channelCount, outputHeight, outputWidth };
779 inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType);
780 outputTensorInfo = armnn::TensorInfo(4, outputShape, ArmnnType);
782 // Set quantization parameters if the requested type is a quantized type.
783 if(armnn::IsQuantizedType<T>())
785 inputTensorInfo.SetQuantizationScale(qScale);
786 inputTensorInfo.SetQuantizationOffset(qOffset);
787 outputTensorInfo.SetQuantizationScale(qScale);
788 outputTensorInfo.SetQuantizationOffset(qOffset);
791 boost::multi_array<T, 4> input = MakeRandomTensor<T, 4>(inputTensorInfo, 81715);
793 LayerTestResult<T, 4> comparisonResult(outputTensorInfo);
795 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
796 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
798 armnn::Pooling2dQueueDescriptor data;
799 armnn::WorkloadInfo info;
800 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
801 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
802 data.m_Parameters.m_PoolType = poolingType;
803 data.m_Parameters.m_PoolWidth = poolSize;
804 data.m_Parameters.m_PoolHeight = poolSize;
805 data.m_Parameters.m_StrideX = strideX;
806 data.m_Parameters.m_StrideY = strideY;
807 data.m_Parameters.m_PadLeft = padX;
808 data.m_Parameters.m_PadRight = padX;
809 data.m_Parameters.m_PadTop = padY;
810 data.m_Parameters.m_PadBottom = padY;
811 data.m_Parameters.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
813 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refTensorHandleFactory.CreateTensorHandle(outputTensorInfo);
814 std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refTensorHandleFactory.CreateTensorHandle(inputTensorInfo);
816 // Don't execute if Pooling is not supported, as an exception will be raised.
817 armnn::BackendId backend = workloadFactory.GetBackendId();
818 const size_t reasonIfUnsupportedMaxLen = 255;
819 char reasonIfUnsupported[reasonIfUnsupportedMaxLen+1];
820 comparisonResult.supported = armnn::IsPooling2dSupported(backend, inputTensorInfo, outputTensorInfo,
822 reasonIfUnsupported, reasonIfUnsupportedMaxLen);
823 if (!comparisonResult.supported)
825 return comparisonResult;
828 armnn::Pooling2dQueueDescriptor refData = data;
829 armnn::WorkloadInfo refInfo = info;
830 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
831 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
833 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePooling2d(data, info);
834 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreatePooling2d(refData, refInfo);
836 outputHandleRef->Allocate();
837 inputHandleRef->Allocate();
838 inputHandle->Allocate();
839 outputHandle->Allocate();
841 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
842 CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]);
845 workloadRef->Execute();
847 CopyDataFromITensorHandle(&comparisonResult.output[0][0][0][0], outputHandle.get());
848 CopyDataFromITensorHandle(&comparisonResult.outputExpected[0][0][0][0], outputHandleRef.get());
850 return comparisonResult;
854 // Tests max pooling with the following parameters:
862 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
863 LayerTestResult<T, 4> SimpleMaxPooling2dSize2x2Stride2x2TestCommon(
864 armnn::IWorkloadFactory& workloadFactory,
865 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
866 const armnn::ITensorHandleFactory& tensorHandleFactory,
871 armnn::Pooling2dDescriptor descriptor;
872 descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
873 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
874 descriptor.m_StrideX = 2;
875 descriptor.m_StrideY = 2;
876 descriptor.m_PadLeft = descriptor.m_PadRight = forceNoPadding ? 0 : 3;
877 descriptor.m_PadTop = descriptor.m_PadBottom = 0;
878 descriptor.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
879 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
882 unsigned int inputWidth = 4;
884 unsigned int inputHeight = 4;
886 unsigned int outputWidth =
887 (inputWidth + descriptor.m_PadLeft + descriptor.m_PadRight + descriptor.m_StrideX - descriptor.m_PoolWidth) /
888 descriptor.m_StrideX;
889 unsigned int outputHeight =
890 (inputHeight + descriptor.m_PadTop + descriptor.m_PadBottom + descriptor.m_StrideY - descriptor.m_PoolHeight) /
891 descriptor.m_StrideY;
892 unsigned int channels = 1;
893 unsigned int batchSize = 1;
895 std::vector<float> inputData = {
896 510.0f, 222.0f, 780.0f, 654.0f,
897 141.0f, 276.0f, 15.0f, 546.0f,
898 303.0f, 618.0f, 582.0f, 339.0f,
899 438.0f, 564.0f, 573.0f, 402.0f
902 // Note that left and right edges will be 0.f, due to the 2x2 max pooling only accessing zeros here.
903 std::vector<float> expectedOutputDataWithPadding = {
904 0.0f, 510.0f, 780.0f, 654.0f, 0.0f,
905 0.0f, 438.0f, 618.0f, 402.0f, 0.0f
908 std::vector<float> expectedOutputDataNoPadding = {
913 armnn::TensorInfo inputTensorInfo({ batchSize, channels, inputHeight, inputWidth }, ArmnnType);
915 // Scale and offset should match input - we're just calculating maximum values.
916 armnn::TensorInfo outputTensorInfo({ batchSize, channels, outputHeight, outputWidth }, ArmnnType);
918 // Set quantization parameters if the requested type is a quantized type.
919 if(armnn::IsQuantizedType<T>())
921 inputTensorInfo.SetQuantizationScale(qScale);
922 inputTensorInfo.SetQuantizationOffset(qOffset);
923 outputTensorInfo.SetQuantizationScale(qScale);
924 outputTensorInfo.SetQuantizationOffset(qOffset);
927 auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(inputData, qScale, qOffset));
929 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
930 forceNoPadding ? QuantizedVector<T>(expectedOutputDataNoPadding, qScale, qOffset) :
931 QuantizedVector<T>(expectedOutputDataWithPadding, qScale, qOffset));
933 return SimplePooling2dTestImpl<ArmnnType>(
934 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
938 // Tests max pooling with the following parameters:
946 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
947 LayerTestResult<T, 4> IgnorePaddingAveragePooling2dSize3x2Stride2x2TestCommon(
948 armnn::IWorkloadFactory& workloadFactory,
949 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
950 const armnn::ITensorHandleFactory& tensorHandleFactory,
955 armnn::Pooling2dDescriptor descriptor;
956 descriptor.m_PoolType = armnn::PoolingAlgorithm::Average;
957 descriptor.m_PoolWidth = 3;
958 descriptor.m_PoolHeight = 2;
959 descriptor.m_StrideX = 2;
960 descriptor.m_StrideY = 2;
961 descriptor.m_PadLeft = (forceNoPadding) ? 0 : 1;
962 descriptor.m_PadRight = descriptor.m_PadLeft;
963 descriptor.m_PadTop = 0;
964 descriptor.m_PadBottom = 0;
965 descriptor.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
966 descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
968 unsigned int inputWidth = 3;
969 unsigned int inputHeight = 2;
970 unsigned int outputWidth =
971 (inputWidth + descriptor.m_PadLeft + descriptor.m_PadRight + descriptor.m_StrideX - descriptor.m_PoolWidth) /
972 descriptor.m_StrideX;
973 unsigned int outputHeight =
974 (inputHeight + descriptor.m_PadTop + descriptor.m_PadBottom + descriptor.m_StrideY - descriptor.m_PoolHeight) /
975 descriptor.m_StrideY;
976 unsigned int channels = 1;
977 unsigned int batchSize = 1;
979 std::vector<float> inputData = {
984 std::vector<float> expectedOutputDataWithPadding = {
988 std::vector<float> expectedOutputDataNoPadding = {
992 armnn::TensorInfo inputTensorInfo({ batchSize, channels, inputHeight, inputWidth }, ArmnnType);
994 // Scale and offset should match input - we're just calculating average values.
995 armnn::TensorInfo outputTensorInfo({ batchSize, channels, outputHeight, outputWidth }, ArmnnType);
997 // Set quantization parameters if the requested type is a quantized type.
998 if(armnn::IsQuantizedType<T>())
1000 inputTensorInfo.SetQuantizationScale(qScale);
1001 inputTensorInfo.SetQuantizationOffset(qOffset);
1002 outputTensorInfo.SetQuantizationScale(qScale);
1003 outputTensorInfo.SetQuantizationOffset(qOffset);
1006 auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(inputData, qScale, qOffset));
1008 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
1009 forceNoPadding ? QuantizedVector<T>(expectedOutputDataNoPadding, qScale, qOffset) :
1010 QuantizedVector<T>(expectedOutputDataWithPadding, qScale, qOffset));
1012 return SimplePooling2dTestImpl<ArmnnType>(
1013 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
1017 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
1018 LayerTestResult<T, 4> IgnorePaddingSimpleMaxPooling2dTestCommon(
1019 armnn::IWorkloadFactory& workloadFactory,
1020 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1021 const armnn::ITensorHandleFactory& tensorHandleFactory,
1022 float qScale = 1.0f,
1023 int32_t qOffset = 0)
1025 armnn::Pooling2dDescriptor descriptor;
1026 descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
1027 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
1028 descriptor.m_StrideX = descriptor.m_StrideY = 2;
1029 descriptor.m_PadLeft = 1;
1030 descriptor.m_PadRight = 1;
1031 descriptor.m_PadTop = 1;
1032 descriptor.m_PadBottom = 1;
1033 descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
1035 armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
1036 armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, ArmnnType);
1038 // Set quantization parameters if the requested type is a quantized type.
1039 if(armnn::IsQuantizedType<T>())
1041 inputTensorInfo.SetQuantizationScale(qScale);
1042 inputTensorInfo.SetQuantizationOffset(qOffset);
1043 outputTensorInfo.SetQuantizationScale(qScale);
1044 outputTensorInfo.SetQuantizationOffset(qOffset);
1047 auto input = MakeTensor<T, 4>(inputTensorInfo,
1048 QuantizedVector<T>({
1049 -1.0f, -2.0f, 3.0f, 4.0f,
1050 -1.0f, -2.0f, 3.0f, 4.0f,
1051 1.0f, 2.0f, -3.0f, -4.0f,
1052 1.0f, 2.0f, -3.0f, -4.0f,
1056 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
1057 QuantizedVector<T>({
1064 return SimplePooling2dTestImpl<ArmnnType>(
1065 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
1068 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
1069 LayerTestResult<T, 4> IgnorePaddingMaxPooling2dSize3TestCommon(
1070 armnn::IWorkloadFactory& workloadFactory,
1071 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1072 const armnn::ITensorHandleFactory& tensorHandleFactory,
1073 float qScale = 1.0f,
1074 int32_t qOffset = 0)
1076 armnn::Pooling2dDescriptor descriptor;
1077 descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
1078 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
1079 descriptor.m_StrideX = descriptor.m_StrideY = 1;
1080 descriptor.m_PadLeft = 1;
1081 descriptor.m_PadRight = 1;
1082 descriptor.m_PadTop = 1;
1083 descriptor.m_PadBottom = 1;
1084 descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
1086 armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
1087 armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
1089 // Set quantization parameters if the requested type is a quantized type.
1090 if(armnn::IsQuantizedType<T>())
1092 inputTensorInfo.SetQuantizationScale(qScale);
1093 inputTensorInfo.SetQuantizationOffset(qOffset);
1094 outputTensorInfo.SetQuantizationScale(qScale);
1095 outputTensorInfo.SetQuantizationOffset(qOffset);
1098 auto input = MakeTensor<T, 4>(inputTensorInfo,
1099 QuantizedVector<T>({
1100 -1.0f, -2.0f, 3.0f, 4.0f,
1101 -1.0f, -2.0f, 3.0f, 4.0f,
1102 1.0f, 2.0f, -3.0f, -4.0f,
1103 1.0f, 2.0f, -3.0f, -4.0f,
1107 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
1108 QuantizedVector<T>({
1109 -1.0f, 3.0f, 4.0f, 4.0f,
1110 2.0f, 3.0f, 4.0f, 4.0f,
1111 2.0f, 3.0f, 4.0f, 4.0f,
1112 2.0f, 2.0f, 2.0f, -3.0f,
1116 return SimplePooling2dTestImpl<ArmnnType>(
1117 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
1120 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
1121 LayerTestResult<T, 4> IgnorePaddingSimpleAveragePooling2dTestCommon(
1122 armnn::IWorkloadFactory& workloadFactory,
1123 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1124 const armnn::ITensorHandleFactory& tensorHandleFactory,
1125 float qScale = 1.0f,
1126 int32_t qOffset = 0)
1128 armnn::Pooling2dDescriptor descriptor;
1129 descriptor.m_PoolType = armnn::PoolingAlgorithm::Average;
1130 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
1131 descriptor.m_StrideX = descriptor.m_StrideY = 2;
1132 descriptor.m_PadLeft = 1;
1133 descriptor.m_PadRight = 1;
1134 descriptor.m_PadTop = 1;
1135 descriptor.m_PadBottom = 1;
1136 descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
1138 armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
1139 armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, ArmnnType);
1141 // Set quantization parameters if the requested type is a quantized type.
1142 if(armnn::IsQuantizedType<T>())
1144 inputTensorInfo.SetQuantizationScale(qScale);
1145 inputTensorInfo.SetQuantizationOffset(qOffset);
1146 outputTensorInfo.SetQuantizationScale(qScale);
1147 outputTensorInfo.SetQuantizationOffset(qOffset);
1150 auto input = MakeTensor<T, 4>(inputTensorInfo,
1151 QuantizedVector<T>({
1152 12.0f, 20.0f, 32.0f, 40.0f,
1153 12.0f, 20.0f, 32.0f, 40.0f,
1154 12.0f, 20.0f, 32.0f, 40.0f,
1155 12.0f, 20.0f, 32.0f, 40.0f,
1159 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
1160 QuantizedVector<T>({
1167 return SimplePooling2dTestImpl<ArmnnType>(
1168 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
1171 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
1172 LayerTestResult<T, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon(
1173 armnn::IWorkloadFactory& workloadFactory,
1174 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1175 const armnn::ITensorHandleFactory& tensorHandleFactory,
1176 float qScale = 1.0f,
1177 int32_t qOffset = 0)
1179 armnn::Pooling2dDescriptor descriptor;
1180 descriptor.m_PoolType = armnn::PoolingAlgorithm::Average;
1181 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
1182 descriptor.m_StrideX = descriptor.m_StrideY = 2;
1183 descriptor.m_PadLeft = 0;
1184 descriptor.m_PadRight = 0;
1185 descriptor.m_PadTop = 0;
1186 descriptor.m_PadBottom = 0;
1187 descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
1188 descriptor.m_OutputShapeRounding = armnn::OutputShapeRounding::Ceiling;
1190 armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4}, ArmnnType);
1191 armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, ArmnnType);
1193 // Set quantization parameters if the requested type is a quantized type.
1194 if(armnn::IsQuantizedType<T>())
1196 inputTensorInfo.SetQuantizationScale(qScale);
1197 inputTensorInfo.SetQuantizationOffset(qOffset);
1198 outputTensorInfo.SetQuantizationScale(qScale);
1199 outputTensorInfo.SetQuantizationOffset(qOffset);
1202 auto input = MakeTensor<T, 4>(inputTensorInfo,
1203 QuantizedVector<T>({
1204 1.0f, 2.0f, 3.0f, 4.0f,
1205 1.0f, 2.0f, 3.0f, 4.0f,
1206 1.0f, 2.0f, 3.0f, 4.0f,
1207 1.0f, 2.0f, 3.0f, 4.0f,
1211 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
1212 QuantizedVector<T>({
1218 return SimplePooling2dTestImpl<ArmnnType>(
1219 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
1222 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
1223 LayerTestResult<T, 4> IgnorePaddingAveragePooling2dSize3TestCommon(
1224 armnn::IWorkloadFactory& workloadFactory,
1225 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1226 const armnn::ITensorHandleFactory& tensorHandleFactory,
1227 float qScale = 1.0f,
1228 int32_t qOffset = 0)
1230 armnn::Pooling2dDescriptor descriptor;
1231 descriptor.m_PoolType = armnn::PoolingAlgorithm::Average;
1232 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
1233 descriptor.m_StrideX = descriptor.m_StrideY = 1;
1234 descriptor.m_PadLeft = 1;
1235 descriptor.m_PadRight = 1;
1236 descriptor.m_PadTop = 1;
1237 descriptor.m_PadBottom = 1;
1238 descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
1240 armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
1241 armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
1243 // Set quantization parameters if the requested type is a quantized type.
1244 if(armnn::IsQuantizedType<T>())
1246 inputTensorInfo.SetQuantizationScale(qScale);
1247 inputTensorInfo.SetQuantizationOffset(qOffset);
1248 outputTensorInfo.SetQuantizationScale(qScale);
1249 outputTensorInfo.SetQuantizationOffset(qOffset);
1252 auto input = MakeTensor<T, 4>(inputTensorInfo,
1253 QuantizedVector<T>({
1254 9.0f, 27.0f, 18.0f, 36.0f,
1255 18.0f, 9.0f, 18.0f, 9.0f,
1256 27.0f, 18.0f, 9.0f, 27.0f,
1257 9.0f, 27.0f, 9.0f, 18.0f,
1261 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
1262 QuantizedVector<T>({
1263 7.0f, 11.0f, 13.0f, 9.0f,
1264 12.0f, 17.0f, 19.0f, 13.0f,
1265 12.0f, 16.0f, 16.0f, 10.0f,
1266 9.0f, 11.0f, 12.0f, 7.0f,
1270 return SimplePooling2dTestImpl<ArmnnType>(
1271 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
1274 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
1275 LayerTestResult<T, 4> IgnorePaddingSimpleL2Pooling2dTestCommon(
1276 armnn::IWorkloadFactory& workloadFactory,
1277 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1278 const armnn::ITensorHandleFactory& tensorHandleFactory,
1279 float qScale = 1.0f,
1280 int32_t qOffset = 0)
1282 armnn::Pooling2dDescriptor descriptor;
1283 descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
1284 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
1285 descriptor.m_StrideX = descriptor.m_StrideY = 2;
1286 descriptor.m_PadLeft = 1;
1287 descriptor.m_PadRight = 1;
1288 descriptor.m_PadTop = 1;
1289 descriptor.m_PadBottom = 1;
1290 descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
1292 armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
1293 armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, ArmnnType);
1295 // Set quantization parameters if the requested type is a quantized type.
1296 if(armnn::IsQuantizedType<T>())
1298 inputTensorInfo.SetQuantizationScale(qScale);
1299 inputTensorInfo.SetQuantizationOffset(qOffset);
1300 outputTensorInfo.SetQuantizationScale(qScale);
1301 outputTensorInfo.SetQuantizationOffset(qOffset);
1304 auto input = MakeTensor<T, 4>(inputTensorInfo,
1305 QuantizedVector<T>({
1306 2.0f, 4.0f, 8.0f, 16.0f,
1307 4.0f, 2.0f, 2.0f, 4.0f,
1308 8.0f, 2.0f, 4.0f, 2.0f,
1309 16.0f, 2.0f, 2.0f, 8.0f,
1313 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
1314 QuantizedVector<T>({
1315 1.0f, 4.4721f, 8.0f,
1316 4.4721f, 2.6457f, 2.236f,
1317 8.0f, 1.4142f, 4.0f,
1321 return SimplePooling2dTestImpl<ArmnnType>(
1322 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
1325 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
1326 LayerTestResult<T, 4> IgnorePaddingL2Pooling2dSize3TestCommon(
1327 armnn::IWorkloadFactory& workloadFactory,
1328 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1329 const armnn::ITensorHandleFactory& tensorHandleFactory,
1330 float qScale = 1.0f,
1331 int32_t qOffset = 0)
1333 armnn::Pooling2dDescriptor descriptor;
1334 descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
1335 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
1336 descriptor.m_StrideX = descriptor.m_StrideY = 1;
1337 descriptor.m_PadLeft = 1;
1338 descriptor.m_PadRight = 1;
1339 descriptor.m_PadTop = 1;
1340 descriptor.m_PadBottom = 1;
1341 descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
1343 armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
1344 armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
1346 // Set quantization parameters if the requested type is a quantized type.
1347 if(armnn::IsQuantizedType<T>())
1349 inputTensorInfo.SetQuantizationScale(qScale);
1350 inputTensorInfo.SetQuantizationOffset(qOffset);
1351 outputTensorInfo.SetQuantizationScale(qScale);
1352 outputTensorInfo.SetQuantizationOffset(qOffset);
1355 auto input = MakeTensor<T, 4>(inputTensorInfo,
1356 QuantizedVector<T>({
1357 1.0f, 2.0f, 3.0f, 4.0f,
1358 1.0f, 2.0f, 3.0f, 4.0f,
1359 1.0f, 2.0f, 3.0f, 4.0f,
1360 1.0f, 2.0f, 3.0f, 4.0f,
1364 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
1365 QuantizedVector<T>({
1366 1.0540f, 1.7638f, 2.5385f, 2.3570f,
1367 1.2909f, 2.1602f, 3.1091f, 2.8867f,
1368 1.2909f, 2.1602f, 3.1091f, 2.8867f,
1369 1.0540f, 1.7638f, 2.5385f, 2.3570f,
1373 return SimplePooling2dTestImpl<ArmnnType>(
1374 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
1377 } // anonymous namespace
1379 LayerTestResult<float, 4> SimpleMaxPooling2dSize2x2Stride2x2Test(
1380 armnn::IWorkloadFactory& workloadFactory,
1381 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1382 const armnn::ITensorHandleFactory& tensorHandleFactory,
1383 bool forceNoPadding)
1385 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::Float32>(
1386 workloadFactory, memoryManager, tensorHandleFactory, forceNoPadding);
1389 LayerTestResult<uint8_t, 4> SimpleMaxPooling2dSize2x2Stride2x2Uint8Test(
1390 armnn::IWorkloadFactory& workloadFactory,
1391 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1392 const armnn::ITensorHandleFactory& tensorHandleFactory,
1393 bool forceNoPadding)
1395 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::QAsymmU8>(
1396 workloadFactory, memoryManager, tensorHandleFactory, forceNoPadding, 3.0f, -5);
1399 LayerTestResult<int16_t, 4> SimpleMaxPooling2dSize2x2Stride2x2Int16Test(
1400 armnn::IWorkloadFactory& workloadFactory,
1401 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1402 const armnn::ITensorHandleFactory& tensorHandleFactory,
1403 bool forceNoPadding)
1405 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::QSymmS16>(
1406 workloadFactory, memoryManager, tensorHandleFactory, forceNoPadding);
1409 LayerTestResult<float, 4> SimpleMaxPooling2dSize3x3Stride2x4Test(
1410 armnn::IWorkloadFactory& workloadFactory,
1411 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1412 const armnn::ITensorHandleFactory& tensorHandleFactory,
1413 bool forceNoPadding)
1415 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::Float32>(
1416 workloadFactory, memoryManager, tensorHandleFactory, forceNoPadding);
1419 LayerTestResult<uint8_t, 4> SimpleMaxPooling2dSize3x3Stride2x4Uint8Test(
1420 armnn::IWorkloadFactory& workloadFactory,
1421 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1422 const armnn::ITensorHandleFactory& tensorHandleFactory,
1423 bool forceNoPadding)
1425 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::QAsymmU8>(
1426 workloadFactory, memoryManager, tensorHandleFactory, forceNoPadding, 0.1f, 128);
1429 LayerTestResult<int16_t, 4> SimpleMaxPooling2dSize3x3Stride2x4Int16Test(
1430 armnn::IWorkloadFactory& workloadFactory,
1431 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1432 const armnn::ITensorHandleFactory& tensorHandleFactory,
1433 bool forceNoPadding)
1435 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::QSymmS16>(
1436 workloadFactory, memoryManager, tensorHandleFactory, forceNoPadding);
1439 LayerTestResult<float, 4> SimpleMaxPooling2dTest(
1440 armnn::IWorkloadFactory& workloadFactory,
1441 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1442 const armnn::ITensorHandleFactory& tensorHandleFactory,
1443 const armnn::DataLayout dataLayout)
1445 return SimpleMaxPooling2dTestCommon<armnn::DataType::Float32>(
1446 workloadFactory, memoryManager, tensorHandleFactory, dataLayout);
1449 LayerTestResult<uint8_t, 4> SimpleMaxPooling2dUint8Test(
1450 armnn::IWorkloadFactory& workloadFactory,
1451 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1452 const armnn::ITensorHandleFactory& tensorHandleFactory,
1453 const armnn::DataLayout dataLayout)
1455 return SimpleMaxPooling2dTestCommon<armnn::DataType::QAsymmU8>(
1456 workloadFactory, memoryManager, tensorHandleFactory, dataLayout);
1459 LayerTestResult<int16_t, 4> SimpleMaxPooling2dInt16Test(
1460 armnn::IWorkloadFactory& workloadFactory,
1461 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1462 const armnn::ITensorHandleFactory& tensorHandleFactory,
1463 const armnn::DataLayout dataLayout)
1465 return SimpleMaxPooling2dTestCommon<armnn::DataType::QSymmS16>(
1466 workloadFactory, memoryManager, tensorHandleFactory, dataLayout);
1468 LayerTestResult<float, 4> IgnorePaddingSimpleMaxPooling2dTest(
1469 armnn::IWorkloadFactory& workloadFactory,
1470 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1471 const armnn::ITensorHandleFactory& tensorHandleFactory)
1473 return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::Float32>(
1474 workloadFactory, memoryManager, tensorHandleFactory);
1477 LayerTestResult<uint8_t, 4> IgnorePaddingSimpleMaxPooling2dUint8Test(
1478 armnn::IWorkloadFactory& workloadFactory,
1479 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1480 const armnn::ITensorHandleFactory& tensorHandleFactory)
1482 return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::QAsymmU8>(
1483 workloadFactory, memoryManager, tensorHandleFactory, 1.0f, -5);
1486 LayerTestResult<int16_t, 4> IgnorePaddingSimpleMaxPooling2dInt16Test(
1487 armnn::IWorkloadFactory& workloadFactory,
1488 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1489 const armnn::ITensorHandleFactory& tensorHandleFactory)
1491 return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::QSymmS16>(
1492 workloadFactory, memoryManager, tensorHandleFactory);
1495 LayerTestResult<float, 4> IgnorePaddingMaxPooling2dSize3Test(
1496 armnn::IWorkloadFactory& workloadFactory,
1497 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1498 const armnn::ITensorHandleFactory& tensorHandleFactory)
1500 return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::Float32>(
1501 workloadFactory, memoryManager, tensorHandleFactory);
1504 LayerTestResult<uint8_t, 4> IgnorePaddingMaxPooling2dSize3Uint8Test(
1505 armnn::IWorkloadFactory& workloadFactory,
1506 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1507 const armnn::ITensorHandleFactory& tensorHandleFactory)
1509 return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::QAsymmU8>(
1510 workloadFactory, memoryManager, tensorHandleFactory, 1.0f, -5);
1513 LayerTestResult<int16_t, 4> IgnorePaddingMaxPooling2dSize3Int16Test(
1514 armnn::IWorkloadFactory& workloadFactory,
1515 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1516 const armnn::ITensorHandleFactory& tensorHandleFactory)
1518 return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::QSymmS16>(
1519 workloadFactory, memoryManager, tensorHandleFactory);
1522 LayerTestResult<float, 4> SimpleAveragePooling2dTest(
1523 armnn::IWorkloadFactory& workloadFactory,
1524 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1525 const armnn::ITensorHandleFactory& tensorHandleFactory,
1526 const armnn::DataLayout dataLayout)
1528 return SimpleAveragePooling2dTestCommon<armnn::DataType::Float32>(
1529 workloadFactory, memoryManager, tensorHandleFactory, dataLayout);
1532 LayerTestResult<uint8_t, 4> SimpleAveragePooling2dUint8Test(
1533 armnn::IWorkloadFactory& workloadFactory,
1534 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1535 const armnn::ITensorHandleFactory& tensorHandleFactory,
1536 const armnn::DataLayout dataLayout)
1538 return SimpleAveragePooling2dTestCommon<armnn::DataType::QAsymmU8>(
1539 workloadFactory, memoryManager, tensorHandleFactory, dataLayout, 0.5, -1);
1542 LayerTestResult<int16_t, 4> SimpleAveragePooling2dInt16Test(
1543 armnn::IWorkloadFactory& workloadFactory,
1544 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1545 const armnn::ITensorHandleFactory& tensorHandleFactory,
1546 const armnn::DataLayout dataLayout)
1548 return SimpleAveragePooling2dTestCommon<armnn::DataType::QSymmS16>(
1549 workloadFactory, memoryManager, tensorHandleFactory, dataLayout);
1552 LayerTestResult<float, 4> IgnorePaddingAveragePooling2dSize3x2Stride2x2Test(
1553 armnn::IWorkloadFactory& workloadFactory,
1554 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1555 const armnn::ITensorHandleFactory& tensorHandleFactory,
1556 bool forceNoPadding)
1558 return IgnorePaddingAveragePooling2dSize3x2Stride2x2TestCommon<armnn::DataType::Float32>(
1559 workloadFactory, memoryManager, tensorHandleFactory, forceNoPadding);
1562 LayerTestResult<float, 4> LargeTensorsAveragePooling2dTest(
1563 armnn::IWorkloadFactory& workloadFactory,
1564 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1565 const armnn::ITensorHandleFactory& tensorHandleFactory)
1567 return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::Float32>(
1568 workloadFactory, memoryManager, tensorHandleFactory);
1571 LayerTestResult<uint8_t, 4> LargeTensorsAveragePooling2dUint8Test(
1572 armnn::IWorkloadFactory& workloadFactory,
1573 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1574 const armnn::ITensorHandleFactory& tensorHandleFactory)
1576 return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::QAsymmU8>(
1577 workloadFactory, memoryManager, tensorHandleFactory, 0.5, -1);
1580 LayerTestResult<int16_t, 4> LargeTensorsAveragePooling2dInt16Test(
1581 armnn::IWorkloadFactory& workloadFactory,
1582 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1583 const armnn::ITensorHandleFactory& tensorHandleFactory)
1585 return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::QSymmS16>(
1586 workloadFactory, memoryManager, tensorHandleFactory);
1588 LayerTestResult<float, 4> IgnorePaddingSimpleAveragePooling2dTest(
1589 armnn::IWorkloadFactory& workloadFactory,
1590 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1591 const armnn::ITensorHandleFactory& tensorHandleFactory)
1593 return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::Float32>(
1594 workloadFactory, memoryManager, tensorHandleFactory);
1597 LayerTestResult<uint8_t, 4> IgnorePaddingSimpleAveragePooling2dUint8Test(
1598 armnn::IWorkloadFactory& workloadFactory,
1599 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1600 const armnn::ITensorHandleFactory& tensorHandleFactory)
1602 return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::QAsymmU8>(
1603 workloadFactory, memoryManager, tensorHandleFactory);
1606 LayerTestResult<int16_t, 4> IgnorePaddingSimpleAveragePooling2dInt16Test(
1607 armnn::IWorkloadFactory& workloadFactory,
1608 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1609 const armnn::ITensorHandleFactory& tensorHandleFactory)
1611 return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::QSymmS16>(
1612 workloadFactory, memoryManager, tensorHandleFactory);
1615 LayerTestResult<float, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingTest(
1616 armnn::IWorkloadFactory& workloadFactory,
1617 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1618 const armnn::ITensorHandleFactory& tensorHandleFactory)
1620 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::Float32>(
1621 workloadFactory, memoryManager, tensorHandleFactory);
1624 LayerTestResult<uint8_t, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingUint8Test(
1625 armnn::IWorkloadFactory& workloadFactory,
1626 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1627 const armnn::ITensorHandleFactory& tensorHandleFactory)
1629 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::QAsymmU8>(
1630 workloadFactory, memoryManager, tensorHandleFactory);
1633 LayerTestResult<int16_t, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingInt16Test(
1634 armnn::IWorkloadFactory& workloadFactory,
1635 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1636 const armnn::ITensorHandleFactory& tensorHandleFactory)
1638 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::QSymmS16>(
1639 workloadFactory, memoryManager, tensorHandleFactory);
1642 LayerTestResult<float, 4> IgnorePaddingAveragePooling2dSize3Test(
1643 armnn::IWorkloadFactory& workloadFactory,
1644 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1645 const armnn::ITensorHandleFactory& tensorHandleFactory)
1647 return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::Float32>(
1648 workloadFactory, memoryManager, tensorHandleFactory);
1651 LayerTestResult<uint8_t, 4> IgnorePaddingAveragePooling2dSize3Uint8Test(
1652 armnn::IWorkloadFactory& workloadFactory,
1653 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1654 const armnn::ITensorHandleFactory& tensorHandleFactory)
1656 return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::QAsymmU8>(
1657 workloadFactory, memoryManager, tensorHandleFactory);
1660 LayerTestResult<int16_t, 4> IgnorePaddingAveragePooling2dSize3Int16Test(
1661 armnn::IWorkloadFactory& workloadFactory,
1662 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1663 const armnn::ITensorHandleFactory& tensorHandleFactory)
1665 return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::QSymmS16>(
1666 workloadFactory, memoryManager, tensorHandleFactory);
1669 LayerTestResult<float, 4> SimpleL2Pooling2dTest(
1670 armnn::IWorkloadFactory& workloadFactory,
1671 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1672 const armnn::ITensorHandleFactory& tensorHandleFactory,
1673 const armnn::DataLayout dataLayout)
1675 return SimpleL2Pooling2dTestCommon<armnn::DataType::Float32>(
1676 workloadFactory, memoryManager, tensorHandleFactory, dataLayout);
1679 LayerTestResult<uint8_t, 4> SimpleL2Pooling2dUint8Test(
1680 armnn::IWorkloadFactory& workloadFactory,
1681 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1682 const armnn::ITensorHandleFactory& tensorHandleFactory,
1683 const armnn::DataLayout dataLayout)
1685 return SimpleL2Pooling2dTestCommon<armnn::DataType::QAsymmU8>(
1686 workloadFactory, memoryManager, tensorHandleFactory, dataLayout);
1689 LayerTestResult<int16_t, 4> SimpleL2Pooling2dInt16Test(
1690 armnn::IWorkloadFactory& workloadFactory,
1691 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1692 const armnn::ITensorHandleFactory& tensorHandleFactory,
1693 const armnn::DataLayout dataLayout)
1695 return SimpleL2Pooling2dTestCommon<armnn::DataType::QSymmS16>(
1696 workloadFactory, memoryManager, tensorHandleFactory, dataLayout);
1699 LayerTestResult<float, 4> L2Pooling2dSize3Stride1Test(
1700 armnn::IWorkloadFactory& workloadFactory,
1701 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1702 const armnn::ITensorHandleFactory& tensorHandleFactory)
1704 return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::Float32>(
1705 workloadFactory, memoryManager, tensorHandleFactory);
1708 LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride1Uint8Test(
1709 armnn::IWorkloadFactory& workloadFactory,
1710 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1711 const armnn::ITensorHandleFactory& tensorHandleFactory)
1713 return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::QAsymmU8>(
1714 workloadFactory, memoryManager, tensorHandleFactory);
1717 LayerTestResult<int16_t, 4> L2Pooling2dSize3Stride1Int16Test(
1718 armnn::IWorkloadFactory& workloadFactory,
1719 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1720 const armnn::ITensorHandleFactory& tensorHandleFactory)
1722 return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::QSymmS16>(
1723 workloadFactory, memoryManager, tensorHandleFactory);
1726 LayerTestResult<float, 4> L2Pooling2dSize3Stride3Test(
1727 armnn::IWorkloadFactory& workloadFactory,
1728 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1729 const armnn::ITensorHandleFactory& tensorHandleFactory)
1731 return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::Float32>(
1732 workloadFactory, memoryManager, tensorHandleFactory);
1735 LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride3Uint8Test(
1736 armnn::IWorkloadFactory& workloadFactory,
1737 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1738 const armnn::ITensorHandleFactory& tensorHandleFactory)
1740 return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::QAsymmU8>(
1741 workloadFactory, memoryManager, tensorHandleFactory);
1744 LayerTestResult<int16_t, 4> L2Pooling2dSize3Stride3Int16Test(
1745 armnn::IWorkloadFactory& workloadFactory,
1746 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1747 const armnn::ITensorHandleFactory& tensorHandleFactory)
1749 return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::QSymmS16>(
1750 workloadFactory, memoryManager, tensorHandleFactory);
1752 LayerTestResult<float, 4> L2Pooling2dSize3Stride4Test(
1753 armnn::IWorkloadFactory& workloadFactory,
1754 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1755 const armnn::ITensorHandleFactory& tensorHandleFactory)
1757 return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::Float32>(
1758 workloadFactory, memoryManager, tensorHandleFactory);
1761 LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride4Uint8Test(
1762 armnn::IWorkloadFactory& workloadFactory,
1763 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1764 const armnn::ITensorHandleFactory& tensorHandleFactory)
1766 return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::QAsymmU8>(
1767 workloadFactory, memoryManager, tensorHandleFactory);
1770 LayerTestResult<int16_t, 4> L2Pooling2dSize3Stride4Int16Test(
1771 armnn::IWorkloadFactory& workloadFactory,
1772 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1773 const armnn::ITensorHandleFactory& tensorHandleFactory)
1775 return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::QSymmS16>(
1776 workloadFactory, memoryManager, tensorHandleFactory);
1779 LayerTestResult<float, 4> L2Pooling2dSize7Test(
1780 armnn::IWorkloadFactory& workloadFactory,
1781 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1782 const armnn::ITensorHandleFactory& tensorHandleFactory)
1784 return L2Pooling2dSize7TestCommon<armnn::DataType::Float32>(
1785 workloadFactory, memoryManager, tensorHandleFactory);
1788 LayerTestResult<uint8_t, 4> L2Pooling2dSize7Uint8Test(
1789 armnn::IWorkloadFactory& workloadFactory,
1790 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1791 const armnn::ITensorHandleFactory& tensorHandleFactory)
1793 return L2Pooling2dSize7TestCommon<armnn::DataType::QAsymmU8>(
1794 workloadFactory, memoryManager, tensorHandleFactory);
1797 LayerTestResult<int16_t, 4> L2Pooling2dSize7Int16Test(
1798 armnn::IWorkloadFactory& workloadFactory,
1799 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1800 const armnn::ITensorHandleFactory& tensorHandleFactory)
1802 return L2Pooling2dSize7TestCommon<armnn::DataType::QSymmS16>(
1803 workloadFactory, memoryManager, tensorHandleFactory);
1806 LayerTestResult<float, 4> L2Pooling2dSize9Test(
1807 armnn::IWorkloadFactory& workloadFactory,
1808 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1809 const armnn::ITensorHandleFactory& tensorHandleFactory)
1811 return L2Pooling2dSize9TestCommon<armnn::DataType::Float32>(
1812 workloadFactory, memoryManager, tensorHandleFactory);
1815 LayerTestResult<uint8_t, 4> L2Pooling2dSize9Uint8Test(
1816 armnn::IWorkloadFactory& workloadFactory,
1817 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1818 const armnn::ITensorHandleFactory& tensorHandleFactory)
1820 return L2Pooling2dSize9TestCommon<armnn::DataType::QAsymmU8>(
1821 workloadFactory, memoryManager, tensorHandleFactory);
1824 LayerTestResult<int16_t, 4> L2Pooling2dSize9Int16Test(
1825 armnn::IWorkloadFactory& workloadFactory,
1826 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1827 const armnn::ITensorHandleFactory& tensorHandleFactory)
1829 return L2Pooling2dSize9TestCommon<armnn::DataType::QSymmS16>(
1830 workloadFactory, memoryManager, tensorHandleFactory);
1832 LayerTestResult<float, 4> IgnorePaddingSimpleL2Pooling2dTest(
1833 armnn::IWorkloadFactory& workloadFactory,
1834 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1835 const armnn::ITensorHandleFactory& tensorHandleFactory)
1837 return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::Float32>(
1838 workloadFactory, memoryManager, tensorHandleFactory);
1841 LayerTestResult<uint8_t, 4> IgnorePaddingSimpleL2Pooling2dUint8Test(
1842 armnn::IWorkloadFactory& workloadFactory,
1843 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1844 const armnn::ITensorHandleFactory& tensorHandleFactory)
1846 return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::QAsymmU8>(
1847 workloadFactory, memoryManager, tensorHandleFactory);
1850 LayerTestResult<int16_t, 4> IgnorePaddingSimpleL2Pooling2dInt16Test(
1851 armnn::IWorkloadFactory& workloadFactory,
1852 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1853 const armnn::ITensorHandleFactory& tensorHandleFactory)
1855 return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::QSymmS16>(
1856 workloadFactory, memoryManager, tensorHandleFactory);
1859 LayerTestResult<float, 4> IgnorePaddingL2Pooling2dSize3Test(
1860 armnn::IWorkloadFactory& workloadFactory,
1861 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1862 const armnn::ITensorHandleFactory& tensorHandleFactory)
1864 return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::Float32>(
1865 workloadFactory, memoryManager, tensorHandleFactory);
1868 LayerTestResult<uint8_t, 4> IgnorePaddingL2Pooling2dSize3Uint8Test(
1869 armnn::IWorkloadFactory& workloadFactory,
1870 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1871 const armnn::ITensorHandleFactory& tensorHandleFactory)
1873 return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::QAsymmU8>(
1874 workloadFactory, memoryManager, tensorHandleFactory);
1877 LayerTestResult<int16_t, 4> IgnorePaddingL2Pooling2dSize3Int16Test(
1878 armnn::IWorkloadFactory& workloadFactory,
1879 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1880 const armnn::ITensorHandleFactory& tensorHandleFactory)
1882 return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::QSymmS16>(
1883 workloadFactory, memoryManager, tensorHandleFactory);
1886 LayerTestResult<float, 4> AsymmetricNonSquarePooling2dTest(
1887 armnn::IWorkloadFactory& workloadFactory,
1888 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1889 const armnn::ITensorHandleFactory& tensorHandleFactory)
1891 return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::Float32>(
1892 workloadFactory, memoryManager, tensorHandleFactory);
1895 LayerTestResult<uint8_t, 4> AsymmetricNonSquarePooling2dUint8Test(
1896 armnn::IWorkloadFactory& workloadFactory,
1897 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1898 const armnn::ITensorHandleFactory& tensorHandleFactory)
1900 return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::QAsymmU8>(
1901 workloadFactory, memoryManager, tensorHandleFactory);
1904 LayerTestResult<int16_t, 4> AsymmetricNonSquarePooling2dInt16Test(
1905 armnn::IWorkloadFactory& workloadFactory,
1906 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1907 const armnn::ITensorHandleFactory& tensorHandleFactory)
1909 return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::QSymmS16>(
1910 workloadFactory, memoryManager, tensorHandleFactory);
1913 LayerTestResult<float, 4> ComparePooling2dTest(
1914 armnn::IWorkloadFactory& workloadFactory,
1915 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1916 armnn::IWorkloadFactory& refWorkloadFactory,
1917 const armnn::ITensorHandleFactory& tensorHandleFactory,
1918 const armnn::ITensorHandleFactory& refTensorHandleFactory,
1919 armnn::PoolingAlgorithm poolingType)
1921 return ComparePooling2dTestCommon<armnn::DataType::Float32>(
1922 workloadFactory, memoryManager, refWorkloadFactory, tensorHandleFactory, refTensorHandleFactory, poolingType);
1925 LayerTestResult<uint8_t, 4> ComparePooling2dUint8Test(
1926 armnn::IWorkloadFactory& workloadFactory,
1927 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1928 armnn::IWorkloadFactory& refWorkloadFactory,
1929 const armnn::ITensorHandleFactory& tensorHandleFactory,
1930 const armnn::ITensorHandleFactory& refTensorHandleFactory,
1931 armnn::PoolingAlgorithm poolingType)
1933 return ComparePooling2dTestCommon<armnn::DataType::QAsymmU8>(
1934 workloadFactory, memoryManager, refWorkloadFactory, tensorHandleFactory, refTensorHandleFactory,
1935 poolingType, 0.1f, 128);
1938 LayerTestResult<int16_t, 4> ComparePooling2dInt16Test(
1939 armnn::IWorkloadFactory& workloadFactory,
1940 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1941 armnn::IWorkloadFactory& refWorkloadFactory,
1942 const armnn::ITensorHandleFactory& tensorHandleFactory,
1943 const armnn::ITensorHandleFactory& refTensorHandleFactory,
1944 armnn::PoolingAlgorithm poolingType)
1946 return ComparePooling2dTestCommon<armnn::DataType::QSymmS16>(
1947 workloadFactory, memoryManager, refWorkloadFactory, tensorHandleFactory, refTensorHandleFactory, poolingType);