2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
7 #include "WorkloadTestUtils.hpp"
8 #include "TensorUtils.hpp"
10 #include "QuantizeHelper.hpp"
12 #include <armnn/ArmNN.hpp>
14 #include <Permute.hpp>
16 #include <backendsCommon/CpuTensorHandle.hpp>
17 #include <backendsCommon/IBackendInternal.hpp>
18 #include <backendsCommon/WorkloadFactory.hpp>
19 #include <backendsCommon/WorkloadInfo.hpp>
21 #include <test/TensorHelpers.hpp>
23 #include <DataLayoutIndexed.hpp>
25 #include <boost/numeric/conversion/cast.hpp>
30 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
31 LayerTestResult<T, 4> SimplePooling2dTestImpl(
32 armnn::IWorkloadFactory& workloadFactory,
33 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
34 armnn::Pooling2dDescriptor descriptor,
37 const boost::multi_array<T, 4>& input,
38 const boost::multi_array<T, 4>& outputExpected)
40 const armnn::DataLayout dataLayout = descriptor.m_DataLayout;
41 const armnnUtils::DataLayoutIndexed dimensionIndices = dataLayout;
42 auto heightIndex = dimensionIndices.GetHeightIndex();
43 auto widthIndex = dimensionIndices.GetWidthIndex();
44 auto channelsIndex = dimensionIndices.GetChannelsIndex();
46 unsigned int inputHeight = boost::numeric_cast<unsigned int>(input.shape()[heightIndex]);
47 unsigned int inputWidth = boost::numeric_cast<unsigned int>(input.shape()[widthIndex]);
48 unsigned int inputChannels = boost::numeric_cast<unsigned int>(input.shape()[channelsIndex]);
49 unsigned int inputBatchSize = boost::numeric_cast<unsigned int>(input.shape()[0]);
51 unsigned int outputHeight = boost::numeric_cast<unsigned int>(outputExpected.shape()[heightIndex]);
52 unsigned int outputWidth = boost::numeric_cast<unsigned int>(outputExpected.shape()[widthIndex]);
53 unsigned int outputChannels = boost::numeric_cast<unsigned int>(outputExpected.shape()[channelsIndex]);
54 unsigned int outputBatchSize = boost::numeric_cast<unsigned int>(outputExpected.shape()[0]);
56 armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo(
57 inputBatchSize, inputChannels, inputHeight, inputWidth, dataLayout, ArmnnType);
59 armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo(
60 outputBatchSize, outputChannels, outputHeight, outputWidth, dataLayout, ArmnnType);
62 // Set quantization parameters if the requested type is a quantized type.
63 if(armnn::IsQuantizedType<T>())
65 inputTensorInfo.SetQuantizationScale(qScale);
66 inputTensorInfo.SetQuantizationOffset(qOffset);
67 outputTensorInfo.SetQuantizationScale(qScale);
68 outputTensorInfo.SetQuantizationOffset(qOffset);
71 LayerTestResult<T, 4> result(outputTensorInfo);
73 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
74 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
76 armnn::Pooling2dQueueDescriptor queueDescriptor;
77 queueDescriptor.m_Parameters = descriptor;
78 queueDescriptor.m_Parameters.m_DataLayout = dataLayout;
80 armnn::WorkloadInfo workloadInfo;
81 AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfo, inputHandle.get());
82 AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
84 // Don't execute if Pooling is not supported, as an exception will be raised.
85 armnn::BackendId backend = workloadFactory.GetBackendId();
86 const size_t reasonIfUnsupportedMaxLen = 255;
87 char reasonIfUnsupported[reasonIfUnsupportedMaxLen+1];
88 result.supported = armnn::IsPooling2dSupported(backend, inputTensorInfo, outputTensorInfo,
89 queueDescriptor.m_Parameters,
90 reasonIfUnsupported, reasonIfUnsupportedMaxLen);
91 if (!result.supported)
96 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePooling2d(queueDescriptor, workloadInfo);
98 inputHandle->Allocate();
99 outputHandle->Allocate();
101 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
105 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
107 result.outputExpected = outputExpected;
113 // Tests max pooling with the following parameters:
121 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
122 LayerTestResult<T, 4> SimpleMaxPooling2dSize3x3Stride2x4TestCommon(
123 armnn::IWorkloadFactory& workloadFactory,
124 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
129 armnn::Pooling2dDescriptor descriptor;
130 descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
131 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
132 descriptor.m_StrideX = 2;
133 descriptor.m_StrideY = 4;
134 // forceNoPadding is mainly used for compatibility with ARM Compute.
135 // As of 16/05/2017, it errors if padX or padY are equal to or greater than the pool size.
136 descriptor.m_PadLeft = descriptor.m_PadRight = forceNoPadding ? 0 : 3;
137 descriptor.m_PadTop = descriptor.m_PadBottom = 0;
138 descriptor.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
139 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
141 unsigned int inputWidth = 8;
142 unsigned int inputHeight = 13;
143 unsigned int outputWidth =
144 (inputWidth + descriptor.m_PadLeft + descriptor.m_PadRight + descriptor.m_StrideX - descriptor.m_PoolWidth) /
145 descriptor.m_StrideX;
146 unsigned int outputHeight =
147 (inputHeight + descriptor.m_PadTop + descriptor.m_PadBottom + descriptor.m_StrideY - descriptor.m_PoolHeight) /
148 descriptor.m_StrideY;
149 unsigned int channels = 2;
150 unsigned int batchSize = 2;
152 armnn::TensorInfo inputTensorInfo({ batchSize, channels, inputHeight, inputWidth }, ArmnnType);
153 armnn::TensorInfo outputTensorInfo({ batchSize, channels, outputHeight, outputWidth }, ArmnnType);
155 // Set quantization parameters if the requested type is a quantized type.
156 if(armnn::IsQuantizedType<T>())
158 inputTensorInfo.SetQuantizationScale(qScale);
159 inputTensorInfo.SetQuantizationOffset(qOffset);
160 outputTensorInfo.SetQuantizationScale(qScale);
161 outputTensorInfo.SetQuantizationOffset(qOffset);
164 std::vector<float> singleChannelData({
165 0.0f, 4.0f, 8.0f, 1.0f, 6.0f, 4.0f, 5.0f, 8.0f,
166 1.0f, 1.0f, 6.0f, 0.0f, 3.0f, 7.0f, 4.0f, 7.0f,
167 8.0f, 5.0f, 0.0f, 0.0f, 8.0f, 3.0f, 4.0f, 3.0f,
168 8.0f, 2.0f, 5.0f, 4.0f, 1.0f, 9.0f, 2.0f, 0.0f,
169 5.0f, 4.0f, 5.0f, 0.0f, 0.0f, 0.0f, 7.0f, 2.0f,
170 1.0f, 2.0f, 6.0f, 2.0f, 7.0f, 9.0f, 5.0f, 2.0f,
171 9.0f, 7.0f, 3.0f, 1.0f, 3.0f, 4.0f, 8.0f, 3.0f,
172 1.0f, 0.0f, 0.0f, 5.0f, 5.0f, 4.0f, 2.0f, 0.0f,
173 6.0f, 4.0f, 3.0f, 6.0f, 9.0f, 5.0f, 5.0f, 6.0f,
174 8.0f, 7.0f, 9.0f, 6.0f, 1.0f, 4.0f, 1.0f, 9.0f,
175 7.0f, 1.0f, 9.0f, 2.0f, 9.0f, 9.0f, 8.0f, 1.0f,
176 4.0f, 4.0f, 5.0f, 9.0f, 2.0f, 6.0f, 6.0f, 4.0f,
177 3.0f, 5.0f, 4.0f, 0.0f, 1.0f, 5.0f, 9.0f, 7.0f,
180 // Constructs input data.
181 std::vector<float> inputData;
182 auto negator = [](float f) { return -f; };
184 // First image (two channels where the second channel is the negative of the first one).
185 inputData.insert(inputData.end(), singleChannelData.begin(), singleChannelData.end());
186 std::transform(singleChannelData.begin(), singleChannelData.end(), std::back_inserter(inputData), negator);
188 // Second image (same as first image).
189 inputData.insert(inputData.end(), singleChannelData.begin(), singleChannelData.end());
190 std::transform(singleChannelData.begin(), singleChannelData.end(), std::back_inserter(inputData), negator);
192 auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, inputData));
194 // These were calculated manually.
195 auto shape(GetTensorShapeAsArray<4>(outputTensorInfo));
196 boost::multi_array<T, 4> outputExpected(shape);
199 outputExpected = MakeTensor<T, 4>(outputTensorInfo,
200 QuantizedVector<T>(qScale, qOffset, {
220 outputExpected = MakeTensor<T, 4>(outputTensorInfo,
221 QuantizedVector<T>(qScale, qOffset, {
222 0.0f, 8.0f, 8.0f, 8.0f, 8.0f, 8.0f,
223 0.0f, 9.0f, 7.0f, 9.0f, 9.0f, 3.0f,
224 0.0f, 8.0f, 9.0f, 9.0f, 9.0f, 9.0f,
226 0.0f, 0.0f, 0.0f, 0.0f,-3.0f, 0.0f,
227 0.0f,-1.0f, 0.0f, 0.0f, 0.0f, 0.0f,
228 0.0f,-1.0f,-1.0f,-1.0f,-1.0f, 0.0f,
230 0.0f, 8.0f, 8.0f, 8.0f, 8.0f, 8.0f,
231 0.0f, 9.0f, 7.0f, 9.0f, 9.0f, 3.0f,
232 0.0f, 8.0f, 9.0f, 9.0f, 9.0f, 9.0f,
234 0.0f, 0.0f, 0.0f, 0.0f,-3.0f, 0.0f,
235 0.0f,-1.0f, 0.0f, 0.0f, 0.0f, 0.0f,
236 0.0f,-1.0f,-1.0f,-1.0f,-1.0f, 0.0f
240 return SimplePooling2dTestImpl<ArmnnType>(
241 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
244 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
245 LayerTestResult<T, 4> SimpleMaxPooling2dTestCommon(
246 armnn::IWorkloadFactory& workloadFactory,
247 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
248 const armnn::DataLayout dataLayout = armnn::DataLayout::NCHW,
252 armnn::Pooling2dDescriptor descriptor;
253 descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
254 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
255 descriptor.m_StrideX = descriptor.m_StrideY = 2;
256 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
257 descriptor.m_DataLayout = dataLayout;
259 armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, ArmnnType);
260 armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 2, 2, dataLayout, ArmnnType);
262 // Set quantization parameters if the requested type is a quantized type.
263 if(armnn::IsQuantizedType<T>())
265 inputTensorInfo.SetQuantizationScale(qScale);
266 inputTensorInfo.SetQuantizationOffset(qOffset);
267 outputTensorInfo.SetQuantizationScale(qScale);
268 outputTensorInfo.SetQuantizationOffset(qOffset);
271 std::vector<T> inputData(
272 QuantizedVector<T>(qScale, qOffset, {
273 1.0f, 2.0f, 5.0f, 6.0f,
274 3.0f, 4.0f, 7.0f, 8.0f,
275 9.0f, 10.0f, 13.0f, 14.0f,
276 11.0f, 12.0f, 15.0f, 16.0f,
278 17.0f, 18.0f, 21.0f, 22.0f,
279 19.0f, 20.0f, 23.0f, 24.0f,
280 25.0f, 26.0f, 29.0f, 30.0f,
281 27.0f, 28.0f, 31.0f, 32.0f,
284 std::vector<T> outputData(
285 QuantizedVector<T>(qScale, qOffset, {
293 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
294 if (dataLayout == armnn::DataLayout::NHWC)
296 std::vector<T> tmp(inputData.size());
297 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(T));
300 std::vector<T> tmp1(outputData.size());
301 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(T));
305 auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
307 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputData);
309 return SimplePooling2dTestImpl<ArmnnType>(
310 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
313 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
314 LayerTestResult<T, 4> SimpleAveragePooling2dTestCommon(
315 armnn::IWorkloadFactory& workloadFactory,
316 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
317 armnn::DataLayout dataLayout = armnn::DataLayout::NCHW,
321 armnn::Pooling2dDescriptor descriptor;
322 descriptor.m_PoolType = armnn::PoolingAlgorithm::Average;
323 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
324 descriptor.m_StrideX = descriptor.m_StrideY = 2;
325 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
326 descriptor.m_DataLayout = dataLayout;
328 armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, ArmnnType);
329 armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 2, 2, dataLayout, ArmnnType);
331 // Set quantization parameters if the requested type is a quantized type.
332 if(armnn::IsQuantizedType<T>())
334 inputTensorInfo.SetQuantizationScale(qScale);
335 inputTensorInfo.SetQuantizationOffset(qOffset);
336 outputTensorInfo.SetQuantizationScale(qScale);
337 outputTensorInfo.SetQuantizationOffset(qOffset);
340 std::vector<T> inputData(
341 QuantizedVector<T>(qScale, qOffset, {
342 2.0f, 2.0f, 6.0f, 6.0f,
343 4.0f, 4.0f, 8.0f, 8.0f,
344 10.0f, 12.0f, 14.0f, 16.0f,
345 10.0f, 12.0f, 16.0f, 14.0f,
347 18.0f, 20.0f, 24.0f, 22.0f,
348 20.0f, 18.0f, 22.0f, 24.0f,
349 26.0f, 28.0f, 0.0f, 0.0f,
350 26.0f, 28.0f, 0.0f, 0.0f,
353 std::vector<T> outputData(
354 QuantizedVector<T>(qScale, qOffset, {
362 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
363 if (dataLayout == armnn::DataLayout::NHWC)
365 std::vector<T> tmp(inputData.size());
366 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(T));
369 std::vector<T> tmp1(outputData.size());
370 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(T));
374 auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
376 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputData);
378 return SimplePooling2dTestImpl<ArmnnType>(
379 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
382 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
383 LayerTestResult<T, 4> LargeTensorsAveragePooling2dTestCommon(
384 armnn::IWorkloadFactory& workloadFactory,
385 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
389 armnn::Pooling2dDescriptor descriptor;
390 descriptor.m_PoolType = armnn::PoolingAlgorithm::Average;
391 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 100;
392 descriptor.m_StrideX = descriptor.m_StrideY = 5;
393 descriptor.m_PadLeft = 50;
394 descriptor.m_PadRight = 50;
395 descriptor.m_PadTop = 50;
396 descriptor.m_PadBottom = 50;
397 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
399 armnn::TensorInfo inputTensorInfo({ 5, 3, 52, 60 }, ArmnnType);
400 armnn::TensorInfo outputTensorInfo({ 5, 3, 11, 13 }, ArmnnType);
402 // Set quantization parameters if the requested type is a quantized type.
403 if(armnn::IsQuantizedType<T>())
405 inputTensorInfo.SetQuantizationScale(qScale);
406 inputTensorInfo.SetQuantizationOffset(qOffset);
407 outputTensorInfo.SetQuantizationScale(qScale);
408 outputTensorInfo.SetQuantizationOffset(qOffset);
411 std::vector<T> inputVec;
413 for (unsigned int i = 0 ; i < inputTensorInfo.GetShape().GetNumElements(); ++i)
415 inputVec.push_back(1);
418 auto input = MakeTensor<T, 4>(inputTensorInfo, inputVec);
420 std::vector<T> outputVec;
422 for (unsigned int i = 0 ; i < outputTensorInfo.GetShape().GetNumElements(); ++i)
424 outputVec.push_back(1);
427 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputVec);
429 return SimplePooling2dTestImpl<ArmnnType>(
430 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
433 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
434 LayerTestResult<T, 4> SimpleL2Pooling2dTestCommon(
435 armnn::IWorkloadFactory& workloadFactory,
436 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
437 armnn::DataLayout dataLayout = armnn::DataLayout::NCHW,
441 armnn::Pooling2dDescriptor descriptor;
442 descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
443 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
444 descriptor.m_StrideX = descriptor.m_StrideY = 2;
445 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
446 descriptor.m_DataLayout = dataLayout;
448 armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, ArmnnType);
449 armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 2, 2, dataLayout, ArmnnType);
451 std::vector<T> inputData(
452 QuantizedVector<T>(qScale, qOffset, {
453 1.0f, 7.0f, 5.0f, 5.0f,
454 1.0f, 7.0f, 5.0f, 5.0f,
455 3.0f, 3.0f, 1.0f, 1.0f,
456 3.0f, 3.0f, 1.0f, 1.0f,
458 1.0f, 7.0f, 0.0f, 0.0f,
459 1.0f, 7.0f, 2.0f, 0.0f,
460 0.0f, 2.0f, 1.0f, 1.0f,
461 0.0f, 0.0f, 1.0f, 1.0f,
464 std::vector<T> outputData(
465 QuantizedVector<T>(qScale, qOffset, {
473 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
474 if (dataLayout == armnn::DataLayout::NHWC)
476 std::vector<T> tmp(inputData.size());
477 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(T));
480 std::vector<T> tmp1(outputData.size());
481 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(T));
485 auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
487 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputData);
489 return SimplePooling2dTestImpl<ArmnnType>(
490 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
493 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
494 LayerTestResult<T, 4> L2Pooling2dSize3Stride1TestCommon(
495 armnn::IWorkloadFactory& workloadFactory,
496 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
500 armnn::Pooling2dDescriptor descriptor;
501 descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
502 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
503 descriptor.m_StrideX = descriptor.m_StrideY = 1;
504 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
506 armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
507 auto input = MakeTensor<T, 4>(inputTensorInfo,
508 QuantizedVector<T>(qScale, qOffset, {
509 2.0f, 1.0f, 5.0f, 2.0f,
510 1.0f, 2.0f, 2.0f, 1.0f,
511 5.0f, 4.0f, 1.0f, 5.0f,
512 2.0f, 1.0f, 5.0f, 2.0f,
515 armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, ArmnnType);
516 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
517 QuantizedVector<T>(qScale, qOffset, {
522 return SimplePooling2dTestImpl<ArmnnType>(
523 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
526 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
527 LayerTestResult<T, 4> L2Pooling2dSize3Stride3TestCommon(
528 armnn::IWorkloadFactory& workloadFactory,
529 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
533 armnn::Pooling2dDescriptor descriptor;
534 descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
535 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
536 descriptor.m_StrideX = descriptor.m_StrideY = 3;
537 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
539 armnn::TensorInfo inputTensorInfo({ 1, 1, 9, 9 }, ArmnnType);
540 auto input = MakeTensor<T, 4>(inputTensorInfo,
541 QuantizedVector<T>(qScale, qOffset, {
542 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
543 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
544 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
545 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
546 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
547 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
548 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
549 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
550 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
553 armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, ArmnnType);
554 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
555 QuantizedVector<T>(qScale, qOffset, {
561 return SimplePooling2dTestImpl<ArmnnType>(
562 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
565 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
566 LayerTestResult<T, 4> L2Pooling2dSize3Stride4TestCommon(
567 armnn::IWorkloadFactory& workloadFactory,
568 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
572 armnn::Pooling2dDescriptor descriptor;
573 descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
574 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
575 descriptor.m_StrideX = descriptor.m_StrideY = 4;
576 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
578 armnn::TensorInfo inputTensorInfo({ 1, 1, 7, 7 }, ArmnnType);
579 auto input = MakeTensor<T, 4>(inputTensorInfo,
580 QuantizedVector<T>(qScale, qOffset, {
581 2.0f, 1.0f, 5.0f, 0.0f, 2.0f, 1.0f, 5.0f,
582 1.0f, 2.0f, 2.0f, 0.0f, 1.0f, 2.0f, 2.0f,
583 5.0f, 4.0f, 1.0f, 0.0f, 5.0f, 4.0f, 1.0f,
584 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
585 2.0f, 1.0f, 5.0f, 0.0f, 2.0f, 1.0f, 5.0f,
586 1.0f, 2.0f, 2.0f, 0.0f, 1.0f, 2.0f, 2.0f,
587 5.0f, 4.0f, 1.0f, 0.0f, 5.0f, 4.0f, 1.0f,
590 armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, ArmnnType);
591 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
592 QuantizedVector<T>(qScale, qOffset, {
597 return SimplePooling2dTestImpl<ArmnnType>(
598 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
601 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
602 LayerTestResult<T, 4> L2Pooling2dSize7TestCommon(
603 armnn::IWorkloadFactory& workloadFactory,
604 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
608 armnn::Pooling2dDescriptor descriptor;
609 descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
610 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 7;
611 descriptor.m_StrideX = descriptor.m_StrideY = 7;
612 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
614 armnn::TensorInfo inputTensorInfo({ 1, 1, 7, 7 }, ArmnnType);
615 auto input = MakeTensor<T, 4>(inputTensorInfo,
616 QuantizedVector<T>(qScale, qOffset, {
617 1.0f, 0.0f, 2.0f, 0.0f, 3.0f, 0.0f, 4.0f,
618 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
619 0.0f, 5.0f, 0.0f, 6.0f, 0.0f, 7.0f, 0.0f,
620 8.0f, 0.0f, 9.0f, 0.0f, 10.0f, 0.0f, 5.0f,
621 0.0f, 5.0f, 0.0f, 2.0f, 0.0f, 1.0f, 1.0f,
622 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
623 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
626 armnn::TensorInfo outputTensorInfo({ 1, 1, 1, 1 }, ArmnnType);
627 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
628 QuantizedVector<T>(qScale, qOffset, {
632 return SimplePooling2dTestImpl<ArmnnType>(
633 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
636 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
637 LayerTestResult<T, 4> L2Pooling2dSize9TestCommon(
638 armnn::IWorkloadFactory& workloadFactory,
639 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
643 armnn::Pooling2dDescriptor descriptor;
644 descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
645 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 9;
646 descriptor.m_StrideX = descriptor.m_StrideY = 9;
647 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
649 armnn::TensorInfo inputTensorInfo({ 1, 1, 9, 9 }, ArmnnType);
650 auto input = MakeTensor<T, 4>(inputTensorInfo,
651 QuantizedVector<T>(qScale, qOffset, {
652 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
653 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
654 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
655 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
656 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
657 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
658 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
659 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
660 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
663 armnn::TensorInfo outputTensorInfo({ 1, 1, 1, 1 }, ArmnnType);
664 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
665 QuantizedVector<T>(qScale, qOffset, {
669 return SimplePooling2dTestImpl<ArmnnType>(
670 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
673 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
674 LayerTestResult<T, 4> AsymmetricNonSquarePooling2dTestCommon(
675 armnn::IWorkloadFactory& workloadFactory,
676 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
680 armnn::TensorInfo inputTensorInfo({ 1, 1, 1, 3 }, ArmnnType);
681 armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, ArmnnType);
683 armnn::Pooling2dDescriptor descriptor;
684 descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
685 descriptor.m_PoolWidth = 2;
686 descriptor.m_PoolHeight = 3;
687 descriptor.m_StrideX = 2;
688 descriptor.m_StrideY = 1;
689 descriptor.m_PadLeft = 2;
690 descriptor.m_PadRight = 0;
691 descriptor.m_PadTop = 1;
692 descriptor.m_PadBottom = 2;
693 descriptor.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
694 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
696 // Construct input data.
697 auto input = MakeTensor<T, 4>(inputTensorInfo,
698 QuantizedVector<T>(qScale, qOffset, {
702 // These were calculated manually.
703 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
704 QuantizedVector<T>(qScale, qOffset, {
705 0.0f, 3.0f, 0.0f, 3.0f,
708 return SimplePooling2dTestImpl<ArmnnType>(
709 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
712 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
713 LayerTestResult<T, 4> ComparePooling2dTestCommon(
714 armnn::IWorkloadFactory& workloadFactory,
715 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
716 armnn::IWorkloadFactory& refWorkloadFactory,
717 armnn::PoolingAlgorithm poolingType,
721 const unsigned int inputWidth = 16;
722 const unsigned int inputHeight = 32;
723 const unsigned int channelCount = 2;
724 const unsigned int batchSize = 5;
726 const unsigned int poolSize = 3;
727 const unsigned int strideX = 2;
728 const unsigned int strideY = 4;
729 const unsigned int padX = 0;
730 const unsigned int padY = 0;
732 const unsigned int outputWidth = (inputWidth + 2 * padX + strideX - poolSize) / strideX;
733 const unsigned int outputHeight = (inputHeight + 2 * padY + strideY - poolSize) / strideY;
735 armnn::TensorInfo inputTensorInfo;
736 armnn::TensorInfo outputTensorInfo;
738 unsigned int inputShape[] = { batchSize, channelCount, inputHeight, inputWidth };
739 unsigned int outputShape[] = { batchSize, channelCount, outputHeight, outputWidth };
741 inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType);
742 outputTensorInfo = armnn::TensorInfo(4, outputShape, ArmnnType);
744 // Set quantization parameters if the requested type is a quantized type.
745 if(armnn::IsQuantizedType<T>())
747 inputTensorInfo.SetQuantizationScale(qScale);
748 inputTensorInfo.SetQuantizationOffset(qOffset);
749 outputTensorInfo.SetQuantizationScale(qScale);
750 outputTensorInfo.SetQuantizationOffset(qOffset);
753 boost::multi_array<T, 4> input = MakeRandomTensor<T, 4>(inputTensorInfo, 81715);
755 LayerTestResult<T, 4> comparisonResult(outputTensorInfo);
757 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
758 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
760 armnn::Pooling2dQueueDescriptor data;
761 armnn::WorkloadInfo info;
762 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
763 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
764 data.m_Parameters.m_PoolType = poolingType;
765 data.m_Parameters.m_PoolWidth = poolSize;
766 data.m_Parameters.m_PoolHeight = poolSize;
767 data.m_Parameters.m_StrideX = strideX;
768 data.m_Parameters.m_StrideY = strideY;
769 data.m_Parameters.m_PadLeft = padX;
770 data.m_Parameters.m_PadRight = padX;
771 data.m_Parameters.m_PadTop = padY;
772 data.m_Parameters.m_PadBottom = padY;
773 data.m_Parameters.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
775 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
776 std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refWorkloadFactory.CreateTensorHandle(inputTensorInfo);
778 // Don't execute if Pooling is not supported, as an exception will be raised.
779 armnn::BackendId backend = workloadFactory.GetBackendId();
780 const size_t reasonIfUnsupportedMaxLen = 255;
781 char reasonIfUnsupported[reasonIfUnsupportedMaxLen+1];
782 comparisonResult.supported = armnn::IsPooling2dSupported(backend, inputTensorInfo, outputTensorInfo,
784 reasonIfUnsupported, reasonIfUnsupportedMaxLen);
785 if (!comparisonResult.supported)
787 return comparisonResult;
790 armnn::Pooling2dQueueDescriptor refData = data;
791 armnn::WorkloadInfo refInfo = info;
792 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
793 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
795 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePooling2d(data, info);
796 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreatePooling2d(refData, refInfo);
798 outputHandleRef->Allocate();
799 inputHandleRef->Allocate();
800 inputHandle->Allocate();
801 outputHandle->Allocate();
803 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
804 CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]);
807 workloadRef->Execute();
809 CopyDataFromITensorHandle(&comparisonResult.output[0][0][0][0], outputHandle.get());
810 CopyDataFromITensorHandle(&comparisonResult.outputExpected[0][0][0][0], outputHandleRef.get());
812 return comparisonResult;
816 // Tests max pooling with the following parameters:
824 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
825 LayerTestResult<T, 4> SimpleMaxPooling2dSize2x2Stride2x2TestCommon(
826 armnn::IWorkloadFactory& workloadFactory,
827 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
832 armnn::Pooling2dDescriptor descriptor;
833 descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
834 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
835 descriptor.m_StrideX = 2;
836 descriptor.m_StrideY = 2;
837 descriptor.m_PadLeft = descriptor.m_PadRight = forceNoPadding ? 0 : 3;
838 descriptor.m_PadTop = descriptor.m_PadBottom = 0;
839 descriptor.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
840 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
843 unsigned int inputWidth = 4;
845 unsigned int inputHeight = 4;
847 unsigned int outputWidth =
848 (inputWidth + descriptor.m_PadLeft + descriptor.m_PadRight + descriptor.m_StrideX - descriptor.m_PoolWidth) /
849 descriptor.m_StrideX;
850 unsigned int outputHeight =
851 (inputHeight + descriptor.m_PadTop + descriptor.m_PadBottom + descriptor.m_StrideY - descriptor.m_PoolHeight) /
852 descriptor.m_StrideY;
853 unsigned int channels = 1;
854 unsigned int batchSize = 1;
856 std::vector<float> inputData = {
857 510.0f, 222.0f, 780.0f, 654.0f,
858 141.0f, 276.0f, 15.0f, 546.0f,
859 303.0f, 618.0f, 582.0f, 339.0f,
860 438.0f, 564.0f, 573.0f, 402.0f
863 // Note that left and right edges will be 0.f, due to the 2x2 max pooling only accessing zeros here.
864 std::vector<float> expectedOutputDataWithPadding = {
865 0.0f, 510.0f, 780.0f, 654.0f, 0.0f,
866 0.0f, 438.0f, 618.0f, 402.0f, 0.0f
869 std::vector<float> expectedOutputDataNoPadding = {
874 armnn::TensorInfo inputTensorInfo({ batchSize, channels, inputHeight, inputWidth }, ArmnnType);
876 // Scale and offset should match input - we're just calculating maximum values.
877 armnn::TensorInfo outputTensorInfo({ batchSize, channels, outputHeight, outputWidth }, ArmnnType);
879 // Set quantization parameters if the requested type is a quantized type.
880 if(armnn::IsQuantizedType<T>())
882 inputTensorInfo.SetQuantizationScale(qScale);
883 inputTensorInfo.SetQuantizationOffset(qOffset);
884 outputTensorInfo.SetQuantizationScale(qScale);
885 outputTensorInfo.SetQuantizationOffset(qOffset);
888 auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, inputData));
890 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
891 forceNoPadding ? QuantizedVector<T>(qScale, qOffset, expectedOutputDataNoPadding) :
892 QuantizedVector<T>(qScale, qOffset, expectedOutputDataWithPadding));
894 return SimplePooling2dTestImpl<ArmnnType>(
895 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
899 // Tests max pooling with the following parameters:
907 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
908 LayerTestResult<T, 4> IgnorePaddingAveragePooling2dSize3x2Stride2x2TestCommon(
909 armnn::IWorkloadFactory& workloadFactory,
910 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
915 armnn::Pooling2dDescriptor descriptor;
916 descriptor.m_PoolType = armnn::PoolingAlgorithm::Average;
917 descriptor.m_PoolWidth = 3;
918 descriptor.m_PoolHeight = 2;
919 descriptor.m_StrideX = 2;
920 descriptor.m_StrideY = 2;
921 descriptor.m_PadLeft = (forceNoPadding) ? 0 : 1;
922 descriptor.m_PadRight = descriptor.m_PadLeft;
923 descriptor.m_PadTop = 0;
924 descriptor.m_PadBottom = 0;
925 descriptor.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
926 descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
928 unsigned int inputWidth = 3;
929 unsigned int inputHeight = 2;
930 unsigned int outputWidth =
931 (inputWidth + descriptor.m_PadLeft + descriptor.m_PadRight + descriptor.m_StrideX - descriptor.m_PoolWidth) /
932 descriptor.m_StrideX;
933 unsigned int outputHeight =
934 (inputHeight + descriptor.m_PadTop + descriptor.m_PadBottom + descriptor.m_StrideY - descriptor.m_PoolHeight) /
935 descriptor.m_StrideY;
936 unsigned int channels = 1;
937 unsigned int batchSize = 1;
939 std::vector<float> inputData = {
944 std::vector<float> expectedOutputDataWithPadding = {
948 std::vector<float> expectedOutputDataNoPadding = {
952 armnn::TensorInfo inputTensorInfo({ batchSize, channels, inputHeight, inputWidth }, ArmnnType);
954 // Scale and offset should match input - we're just calculating average values.
955 armnn::TensorInfo outputTensorInfo({ batchSize, channels, outputHeight, outputWidth }, ArmnnType);
957 // Set quantization parameters if the requested type is a quantized type.
958 if(armnn::IsQuantizedType<T>())
960 inputTensorInfo.SetQuantizationScale(qScale);
961 inputTensorInfo.SetQuantizationOffset(qOffset);
962 outputTensorInfo.SetQuantizationScale(qScale);
963 outputTensorInfo.SetQuantizationOffset(qOffset);
966 auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, inputData));
968 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
969 forceNoPadding ? QuantizedVector<T>(qScale, qOffset, expectedOutputDataNoPadding) :
970 QuantizedVector<T>(qScale, qOffset, expectedOutputDataWithPadding));
972 return SimplePooling2dTestImpl<ArmnnType>(
973 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
977 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
978 LayerTestResult<T, 4> IgnorePaddingSimpleMaxPooling2dTestCommon(
979 armnn::IWorkloadFactory& workloadFactory,
980 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
984 armnn::Pooling2dDescriptor descriptor;
985 descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
986 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
987 descriptor.m_StrideX = descriptor.m_StrideY = 2;
988 descriptor.m_PadLeft = 1;
989 descriptor.m_PadRight = 1;
990 descriptor.m_PadTop = 1;
991 descriptor.m_PadBottom = 1;
992 descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
994 armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
995 armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, ArmnnType);
997 // Set quantization parameters if the requested type is a quantized type.
998 if(armnn::IsQuantizedType<T>())
1000 inputTensorInfo.SetQuantizationScale(qScale);
1001 inputTensorInfo.SetQuantizationOffset(qOffset);
1002 outputTensorInfo.SetQuantizationScale(qScale);
1003 outputTensorInfo.SetQuantizationOffset(qOffset);
1006 auto input = MakeTensor<T, 4>(inputTensorInfo,
1007 QuantizedVector<T>(qScale, qOffset, {
1008 -1.0f, -2.0f, 3.0f, 4.0f,
1009 -1.0f, -2.0f, 3.0f, 4.0f,
1010 1.0f, 2.0f, -3.0f, -4.0f,
1011 1.0f, 2.0f, -3.0f, -4.0f,
1014 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
1015 QuantizedVector<T>(qScale, qOffset, {
1021 return SimplePooling2dTestImpl<ArmnnType>(
1022 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
1025 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
1026 LayerTestResult<T, 4> IgnorePaddingMaxPooling2dSize3TestCommon(
1027 armnn::IWorkloadFactory& workloadFactory,
1028 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1029 float qScale = 1.0f,
1030 int32_t qOffset = 0)
1032 armnn::Pooling2dDescriptor descriptor;
1033 descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
1034 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
1035 descriptor.m_StrideX = descriptor.m_StrideY = 1;
1036 descriptor.m_PadLeft = 1;
1037 descriptor.m_PadRight = 1;
1038 descriptor.m_PadTop = 1;
1039 descriptor.m_PadBottom = 1;
1040 descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
1042 armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
1043 armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
1045 // Set quantization parameters if the requested type is a quantized type.
1046 if(armnn::IsQuantizedType<T>())
1048 inputTensorInfo.SetQuantizationScale(qScale);
1049 inputTensorInfo.SetQuantizationOffset(qOffset);
1050 outputTensorInfo.SetQuantizationScale(qScale);
1051 outputTensorInfo.SetQuantizationOffset(qOffset);
1054 auto input = MakeTensor<T, 4>(inputTensorInfo,
1055 QuantizedVector<T>(qScale, qOffset, {
1056 -1.0f, -2.0f, 3.0f, 4.0f,
1057 -1.0f, -2.0f, 3.0f, 4.0f,
1058 1.0f, 2.0f, -3.0f, -4.0f,
1059 1.0f, 2.0f, -3.0f, -4.0f,
1062 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
1063 QuantizedVector<T>(qScale, qOffset, {
1064 -1.0f, 3.0f, 4.0f, 4.0f,
1065 2.0f, 3.0f, 4.0f, 4.0f,
1066 2.0f, 3.0f, 4.0f, 4.0f,
1067 2.0f, 2.0f, 2.0f, -3.0f,
1070 return SimplePooling2dTestImpl<ArmnnType>(
1071 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
1074 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
1075 LayerTestResult<T, 4> IgnorePaddingSimpleAveragePooling2dTestCommon(
1076 armnn::IWorkloadFactory& workloadFactory,
1077 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1078 float qScale = 1.0f,
1079 int32_t qOffset = 0)
1081 armnn::Pooling2dDescriptor descriptor;
1082 descriptor.m_PoolType = armnn::PoolingAlgorithm::Average;
1083 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
1084 descriptor.m_StrideX = descriptor.m_StrideY = 2;
1085 descriptor.m_PadLeft = 1;
1086 descriptor.m_PadRight = 1;
1087 descriptor.m_PadTop = 1;
1088 descriptor.m_PadBottom = 1;
1089 descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
1091 armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
1092 armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, ArmnnType);
1094 // Set quantization parameters if the requested type is a quantized type.
1095 if(armnn::IsQuantizedType<T>())
1097 inputTensorInfo.SetQuantizationScale(qScale);
1098 inputTensorInfo.SetQuantizationOffset(qOffset);
1099 outputTensorInfo.SetQuantizationScale(qScale);
1100 outputTensorInfo.SetQuantizationOffset(qOffset);
1103 auto input = MakeTensor<T, 4>(inputTensorInfo,
1104 QuantizedVector<T>(qScale, qOffset, {
1105 12.0f, 20.0f, 32.0f, 40.0f,
1106 12.0f, 20.0f, 32.0f, 40.0f,
1107 12.0f, 20.0f, 32.0f, 40.0f,
1108 12.0f, 20.0f, 32.0f, 40.0f,
1111 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
1112 QuantizedVector<T>(qScale, qOffset, {
1118 return SimplePooling2dTestImpl<ArmnnType>(
1119 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
1122 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
1123 LayerTestResult<T, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon(
1124 armnn::IWorkloadFactory& workloadFactory,
1125 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1126 float qScale = 1.0f,
1127 int32_t qOffset = 0)
1129 armnn::Pooling2dDescriptor descriptor;
1130 descriptor.m_PoolType = armnn::PoolingAlgorithm::Average;
1131 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
1132 descriptor.m_StrideX = descriptor.m_StrideY = 2;
1133 descriptor.m_PadLeft = 0;
1134 descriptor.m_PadRight = 0;
1135 descriptor.m_PadTop = 0;
1136 descriptor.m_PadBottom = 0;
1137 descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
1138 descriptor.m_OutputShapeRounding = armnn::OutputShapeRounding::Ceiling;
1140 armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4}, ArmnnType);
1141 armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, ArmnnType);
1143 // Set quantization parameters if the requested type is a quantized type.
1144 if(armnn::IsQuantizedType<T>())
1146 inputTensorInfo.SetQuantizationScale(qScale);
1147 inputTensorInfo.SetQuantizationOffset(qOffset);
1148 outputTensorInfo.SetQuantizationScale(qScale);
1149 outputTensorInfo.SetQuantizationOffset(qOffset);
1152 auto input = MakeTensor<T, 4>(inputTensorInfo,
1153 QuantizedVector<T>(qScale, qOffset, {
1154 1.0f, 2.0f, 3.0f, 4.0f,
1155 1.0f, 2.0f, 3.0f, 4.0f,
1156 1.0f, 2.0f, 3.0f, 4.0f,
1157 1.0f, 2.0f, 3.0f, 4.0f,
1160 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
1161 QuantizedVector<T>(qScale, qOffset, {
1166 return SimplePooling2dTestImpl<ArmnnType>(
1167 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
1170 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
1171 LayerTestResult<T, 4> IgnorePaddingAveragePooling2dSize3TestCommon(
1172 armnn::IWorkloadFactory& workloadFactory,
1173 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1174 float qScale = 1.0f,
1175 int32_t qOffset = 0)
1177 armnn::Pooling2dDescriptor descriptor;
1178 descriptor.m_PoolType = armnn::PoolingAlgorithm::Average;
1179 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
1180 descriptor.m_StrideX = descriptor.m_StrideY = 1;
1181 descriptor.m_PadLeft = 1;
1182 descriptor.m_PadRight = 1;
1183 descriptor.m_PadTop = 1;
1184 descriptor.m_PadBottom = 1;
1185 descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
1187 armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
1188 armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
1190 // Set quantization parameters if the requested type is a quantized type.
1191 if(armnn::IsQuantizedType<T>())
1193 inputTensorInfo.SetQuantizationScale(qScale);
1194 inputTensorInfo.SetQuantizationOffset(qOffset);
1195 outputTensorInfo.SetQuantizationScale(qScale);
1196 outputTensorInfo.SetQuantizationOffset(qOffset);
1199 auto input = MakeTensor<T, 4>(inputTensorInfo,
1200 QuantizedVector<T>(qScale, qOffset, {
1201 9.0f, 27.0f, 18.0f, 36.0f,
1202 18.0f, 9.0f, 18.0f, 9.0f,
1203 27.0f, 18.0f, 9.0f, 27.0f,
1204 9.0f, 27.0f, 9.0f, 18.0f,
1207 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
1208 QuantizedVector<T>(qScale, qOffset, {
1209 7.0f, 11.0f, 13.0f, 9.0f,
1210 12.0f, 17.0f, 19.0f, 13.0f,
1211 12.0f, 16.0f, 16.0f, 10.0f,
1212 9.0f, 11.0f, 12.0f, 7.0f,
1215 return SimplePooling2dTestImpl<ArmnnType>(
1216 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
1219 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
1220 LayerTestResult<T, 4> IgnorePaddingSimpleL2Pooling2dTestCommon(
1221 armnn::IWorkloadFactory& workloadFactory,
1222 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1223 float qScale = 1.0f,
1224 int32_t qOffset = 0)
1226 armnn::Pooling2dDescriptor descriptor;
1227 descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
1228 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
1229 descriptor.m_StrideX = descriptor.m_StrideY = 2;
1230 descriptor.m_PadLeft = 1;
1231 descriptor.m_PadRight = 1;
1232 descriptor.m_PadTop = 1;
1233 descriptor.m_PadBottom = 1;
1234 descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
1236 armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
1237 armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, ArmnnType);
1239 // Set quantization parameters if the requested type is a quantized type.
1240 if(armnn::IsQuantizedType<T>())
1242 inputTensorInfo.SetQuantizationScale(qScale);
1243 inputTensorInfo.SetQuantizationOffset(qOffset);
1244 outputTensorInfo.SetQuantizationScale(qScale);
1245 outputTensorInfo.SetQuantizationOffset(qOffset);
1248 auto input = MakeTensor<T, 4>(inputTensorInfo,
1249 QuantizedVector<T>(qScale, qOffset, {
1250 2.0f, 4.0f, 8.0f, 16.0f,
1251 4.0f, 2.0f, 2.0f, 4.0f,
1252 8.0f, 2.0f, 4.0f, 2.0f,
1253 16.0f, 2.0f, 2.0f, 8.0f,
1256 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
1257 QuantizedVector<T>(qScale, qOffset, {
1258 1.0f, 4.4721f, 8.0f,
1259 4.4721f, 2.6457f, 2.236f,
1260 8.0f, 1.4142f, 4.0f,
1263 return SimplePooling2dTestImpl<ArmnnType>(
1264 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
1267 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
1268 LayerTestResult<T, 4> IgnorePaddingL2Pooling2dSize3TestCommon(
1269 armnn::IWorkloadFactory& workloadFactory,
1270 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1271 float qScale = 1.0f,
1272 int32_t qOffset = 0)
1274 armnn::Pooling2dDescriptor descriptor;
1275 descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
1276 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
1277 descriptor.m_StrideX = descriptor.m_StrideY = 1;
1278 descriptor.m_PadLeft = 1;
1279 descriptor.m_PadRight = 1;
1280 descriptor.m_PadTop = 1;
1281 descriptor.m_PadBottom = 1;
1282 descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
1284 armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
1285 armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
1287 // Set quantization parameters if the requested type is a quantized type.
1288 if(armnn::IsQuantizedType<T>())
1290 inputTensorInfo.SetQuantizationScale(qScale);
1291 inputTensorInfo.SetQuantizationOffset(qOffset);
1292 outputTensorInfo.SetQuantizationScale(qScale);
1293 outputTensorInfo.SetQuantizationOffset(qOffset);
1296 auto input = MakeTensor<T, 4>(inputTensorInfo,
1297 QuantizedVector<T>(qScale, qOffset, {
1298 1.0f, 2.0f, 3.0f, 4.0f,
1299 1.0f, 2.0f, 3.0f, 4.0f,
1300 1.0f, 2.0f, 3.0f, 4.0f,
1301 1.0f, 2.0f, 3.0f, 4.0f,
1304 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
1305 QuantizedVector<T>(qScale, qOffset, {
1306 1.0540f, 1.7638f, 2.5385f, 2.3570f,
1307 1.2909f, 2.1602f, 3.1091f, 2.8867f,
1308 1.2909f, 2.1602f, 3.1091f, 2.8867f,
1309 1.0540f, 1.7638f, 2.5385f, 2.3570f,
1312 return SimplePooling2dTestImpl<ArmnnType>(
1313 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);