2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
8 #include <armnn/ArmNN.hpp>
10 #include <test/TensorHelpers.hpp>
11 #include "QuantizeHelper.hpp"
13 #include <backends/CpuTensorHandle.hpp>
14 #include <backends/WorkloadFactory.hpp>
15 #include <backends/WorkloadInfo.hpp>
17 #include "Permute.hpp"
18 #include <boost/numeric/conversion/cast.hpp>
21 LayerTestResult<T, 4> SimplePooling2dTestImpl(armnn::IWorkloadFactory& workloadFactory,
22 armnn::Pooling2dDescriptor descriptor,
25 const boost::multi_array<T, 4>& input,
26 const boost::multi_array<T, 4>& outputExpected)
28 const armnn::DataLayoutIndexed dataLayout = descriptor.m_DataLayout;
29 auto heightIndex = dataLayout.GetHeightIndex();
30 auto widthIndex = dataLayout.GetWidthIndex();
31 auto channelsIndex = dataLayout.GetChannelsIndex();
33 unsigned int inputHeight = boost::numeric_cast<unsigned int>(input.shape()[heightIndex]);
34 unsigned int inputWidth = boost::numeric_cast<unsigned int>(input.shape()[widthIndex]);
35 unsigned int inputChannels = boost::numeric_cast<unsigned int>(input.shape()[channelsIndex]);
36 unsigned int inputBatchSize = boost::numeric_cast<unsigned int>(input.shape()[0]);
38 unsigned int outputHeight = boost::numeric_cast<unsigned int>(outputExpected.shape()[heightIndex]);
39 unsigned int outputWidth = boost::numeric_cast<unsigned int>(outputExpected.shape()[widthIndex]);
40 unsigned int outputChannels = boost::numeric_cast<unsigned int>(outputExpected.shape()[channelsIndex]);
41 unsigned int outputBatchSize = boost::numeric_cast<unsigned int>(outputExpected.shape()[0]);
43 armnn::TensorInfo inputTensorInfo = GetTensorInfo<T>(inputBatchSize, inputChannels, inputHeight,
44 inputWidth, dataLayout);
45 armnn::TensorInfo outputTensorInfo = GetTensorInfo<T>(outputBatchSize, outputChannels, outputHeight,
46 outputWidth, dataLayout);
48 // Set quantization parameters if the requested type is a quantized type.
49 if(armnn::IsQuantizedType<T>())
51 inputTensorInfo.SetQuantizationScale(qScale);
52 inputTensorInfo.SetQuantizationOffset(qOffset);
53 outputTensorInfo.SetQuantizationScale(qScale);
54 outputTensorInfo.SetQuantizationOffset(qOffset);
57 LayerTestResult<T, 4> result(outputTensorInfo);
59 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
60 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
62 armnn::Pooling2dQueueDescriptor queueDescriptor;
63 queueDescriptor.m_Parameters = descriptor;
64 queueDescriptor.m_Parameters.m_DataLayout = dataLayout;
66 armnn::WorkloadInfo workloadInfo;
67 AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfo, inputHandle.get());
68 AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
70 // Don't execute if Pooling is not supported, as an exception will be raised.
71 armnn::BackendId backend = workloadFactory.GetBackendId();
72 const size_t reasonIfUnsupportedMaxLen = 255;
73 char reasonIfUnsupported[reasonIfUnsupportedMaxLen+1];
74 result.supported = armnn::IsPooling2dSupported(backend, inputTensorInfo, outputTensorInfo,
75 queueDescriptor.m_Parameters,
76 reasonIfUnsupported, reasonIfUnsupportedMaxLen);
77 if (!result.supported)
82 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePooling2d(queueDescriptor, workloadInfo);
84 inputHandle->Allocate();
85 outputHandle->Allocate();
87 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
91 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
93 result.outputExpected = outputExpected;
99 // Tests max pooling with the following parameters:
108 LayerTestResult<T, 4> SimpleMaxPooling2dSize3x3Stride2x4TestCommon(armnn::IWorkloadFactory& workloadFactory,
113 armnn::Pooling2dDescriptor descriptor;
114 descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
115 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
116 descriptor.m_StrideX = 2;
117 descriptor.m_StrideY = 4;
118 // forceNoPadding is mainly used for compatibility with ARM Compute.
119 // As of 16/05/2017, it errors if padX or padY are equal to or greater than the pool size.
120 descriptor.m_PadLeft = descriptor.m_PadRight = forceNoPadding ? 0 : 3;
121 descriptor.m_PadTop = descriptor.m_PadBottom = 0;
122 descriptor.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
123 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
125 unsigned int inputWidth = 8;
126 unsigned int inputHeight = 13;
127 unsigned int outputWidth =
128 (inputWidth + descriptor.m_PadLeft + descriptor.m_PadRight + descriptor.m_StrideX - descriptor.m_PoolWidth) /
129 descriptor.m_StrideX;
130 unsigned int outputHeight =
131 (inputHeight + descriptor.m_PadTop + descriptor.m_PadBottom + descriptor.m_StrideY - descriptor.m_PoolHeight) /
132 descriptor.m_StrideY;
133 unsigned int channels = 2;
134 unsigned int batchSize = 2;
136 armnn::TensorInfo inputTensorInfo({ batchSize, channels, inputHeight, inputWidth }, armnn::GetDataType<T>());
137 armnn::TensorInfo outputTensorInfo({ batchSize, channels, outputHeight, outputWidth }, armnn::GetDataType<T>());
139 // Set quantization parameters if the requested type is a quantized type.
140 if(armnn::IsQuantizedType<T>())
142 inputTensorInfo.SetQuantizationScale(qScale);
143 inputTensorInfo.SetQuantizationOffset(qOffset);
144 outputTensorInfo.SetQuantizationScale(qScale);
145 outputTensorInfo.SetQuantizationOffset(qOffset);
148 std::vector<float> singleChannelData({
149 0.0f, 4.0f, 8.0f, 1.0f, 6.0f, 4.0f, 5.0f, 8.0f,
150 1.0f, 1.0f, 6.0f, 0.0f, 3.0f, 7.0f, 4.0f, 7.0f,
151 8.0f, 5.0f, 0.0f, 0.0f, 8.0f, 3.0f, 4.0f, 3.0f,
152 8.0f, 2.0f, 5.0f, 4.0f, 1.0f, 9.0f, 2.0f, 0.0f,
153 5.0f, 4.0f, 5.0f, 0.0f, 0.0f, 0.0f, 7.0f, 2.0f,
154 1.0f, 2.0f, 6.0f, 2.0f, 7.0f, 9.0f, 5.0f, 2.0f,
155 9.0f, 7.0f, 3.0f, 1.0f, 3.0f, 4.0f, 8.0f, 3.0f,
156 1.0f, 0.0f, 0.0f, 5.0f, 5.0f, 4.0f, 2.0f, 0.0f,
157 6.0f, 4.0f, 3.0f, 6.0f, 9.0f, 5.0f, 5.0f, 6.0f,
158 8.0f, 7.0f, 9.0f, 6.0f, 1.0f, 4.0f, 1.0f, 9.0f,
159 7.0f, 1.0f, 9.0f, 2.0f, 9.0f, 9.0f, 8.0f, 1.0f,
160 4.0f, 4.0f, 5.0f, 9.0f, 2.0f, 6.0f, 6.0f, 4.0f,
161 3.0f, 5.0f, 4.0f, 0.0f, 1.0f, 5.0f, 9.0f, 7.0f,
164 // Constructs input data.
165 std::vector<float> inputData;
166 auto negator = [](float f) { return -f; };
168 // First image (two channels where the second channel is the negative of the first one).
169 inputData.insert(inputData.end(), singleChannelData.begin(), singleChannelData.end());
170 std::transform(singleChannelData.begin(), singleChannelData.end(), std::back_inserter(inputData), negator);
172 // Second image (same as first image).
173 inputData.insert(inputData.end(), singleChannelData.begin(), singleChannelData.end());
174 std::transform(singleChannelData.begin(), singleChannelData.end(), std::back_inserter(inputData), negator);
176 auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, inputData));
178 // These were calculated manually.
179 auto shape(GetTensorShapeAsArray<4>(outputTensorInfo));
180 boost::multi_array<T, 4> outputExpected(shape);
183 outputExpected = MakeTensor<T, 4>(outputTensorInfo,
184 QuantizedVector<T>(qScale, qOffset, {
204 outputExpected = MakeTensor<T, 4>(outputTensorInfo,
205 QuantizedVector<T>(qScale, qOffset, {
206 0.0f, 8.0f, 8.0f, 8.0f, 8.0f, 8.0f,
207 0.0f, 9.0f, 7.0f, 9.0f, 9.0f, 3.0f,
208 0.0f, 8.0f, 9.0f, 9.0f, 9.0f, 9.0f,
210 0.0f, 0.0f, 0.0f, 0.0f,-3.0f, 0.0f,
211 0.0f,-1.0f, 0.0f, 0.0f, 0.0f, 0.0f,
212 0.0f,-1.0f,-1.0f,-1.0f,-1.0f, 0.0f,
214 0.0f, 8.0f, 8.0f, 8.0f, 8.0f, 8.0f,
215 0.0f, 9.0f, 7.0f, 9.0f, 9.0f, 3.0f,
216 0.0f, 8.0f, 9.0f, 9.0f, 9.0f, 9.0f,
218 0.0f, 0.0f, 0.0f, 0.0f,-3.0f, 0.0f,
219 0.0f,-1.0f, 0.0f, 0.0f, 0.0f, 0.0f,
220 0.0f,-1.0f,-1.0f,-1.0f,-1.0f, 0.0f
224 return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected);
228 LayerTestResult<T, 4> SimpleMaxPooling2dTestCommon(armnn::IWorkloadFactory& workloadFactory,
229 const armnn::DataLayoutIndexed& dataLayout = armnn::DataLayout::NCHW,
233 armnn::Pooling2dDescriptor descriptor;
234 descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
235 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
236 descriptor.m_StrideX = descriptor.m_StrideY = 2;
237 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
238 descriptor.m_DataLayout = dataLayout;
240 armnn::TensorInfo inputTensorInfo = GetTensorInfo<T>(1, 2, 4, 4, dataLayout);
241 armnn::TensorInfo outputTensorInfo = GetTensorInfo<T>(1, 2, 2, 2, dataLayout);
243 // Set quantization parameters if the requested type is a quantized type.
244 if(armnn::IsQuantizedType<T>())
246 inputTensorInfo.SetQuantizationScale(qScale);
247 inputTensorInfo.SetQuantizationOffset(qOffset);
248 outputTensorInfo.SetQuantizationScale(qScale);
249 outputTensorInfo.SetQuantizationOffset(qOffset);
252 std::vector<T> inputData(
253 QuantizedVector<T>(qScale, qOffset, {
254 1.0f, 2.0f, 5.0f, 6.0f,
255 3.0f, 4.0f, 7.0f, 8.0f,
256 9.0f, 10.0f, 13.0f, 14.0f,
257 11.0f, 12.0f, 15.0f, 16.0f,
259 17.0f, 18.0f, 21.0f, 22.0f,
260 19.0f, 20.0f, 23.0f, 24.0f,
261 25.0f, 26.0f, 29.0f, 30.0f,
262 27.0f, 28.0f, 31.0f, 32.0f,
265 std::vector<T> outputData(
266 QuantizedVector<T>(qScale, qOffset, {
274 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
275 if (dataLayout.GetDataLayout() == armnn::DataLayout::NHWC)
277 std::vector<T> tmp(inputData.size());
278 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data());
281 std::vector<T> tmp1(outputData.size());
282 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data());
286 auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
288 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputData);
290 return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected);
294 LayerTestResult<T, 4> SimpleAveragePooling2dTestCommon(armnn::IWorkloadFactory& workloadFactory,
295 armnn::DataLayoutIndexed dataLayout = armnn::DataLayout::NCHW,
299 armnn::Pooling2dDescriptor descriptor;
300 descriptor.m_PoolType = armnn::PoolingAlgorithm::Average;
301 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
302 descriptor.m_StrideX = descriptor.m_StrideY = 2;
303 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
304 descriptor.m_DataLayout = dataLayout;
306 armnn::TensorInfo inputTensorInfo = GetTensorInfo<T>(1, 2, 4, 4, dataLayout);
307 armnn::TensorInfo outputTensorInfo = GetTensorInfo<T>(1, 2, 2, 2, dataLayout);
309 // Set quantization parameters if the requested type is a quantized type.
310 if(armnn::IsQuantizedType<T>())
312 inputTensorInfo.SetQuantizationScale(qScale);
313 inputTensorInfo.SetQuantizationOffset(qOffset);
314 outputTensorInfo.SetQuantizationScale(qScale);
315 outputTensorInfo.SetQuantizationOffset(qOffset);
318 std::vector<T> inputData(
319 QuantizedVector<T>(qScale, qOffset, {
320 2.0f, 2.0f, 6.0f, 6.0f,
321 4.0f, 4.0f, 8.0f, 8.0f,
322 10.0f, 12.0f, 14.0f, 16.0f,
323 10.0f, 12.0f, 16.0f, 14.0f,
325 18.0f, 20.0f, 24.0f, 22.0f,
326 20.0f, 18.0f, 22.0f, 24.0f,
327 26.0f, 28.0f, 0.0f, 0.0f,
328 26.0f, 28.0f, 0.0f, 0.0f,
331 std::vector<T> outputData(
332 QuantizedVector<T>(qScale, qOffset, {
340 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
341 if (dataLayout.GetDataLayout() == armnn::DataLayout::NHWC)
343 std::vector<T> tmp(inputData.size());
344 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data());
347 std::vector<T> tmp1(outputData.size());
348 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data());
352 auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
354 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputData);
356 return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected);
360 LayerTestResult<T, 4> LargeTensorsAveragePooling2dTestCommon(armnn::IWorkloadFactory& workloadFactory,
364 armnn::Pooling2dDescriptor descriptor;
365 descriptor.m_PoolType = armnn::PoolingAlgorithm::Average;
366 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 100;
367 descriptor.m_StrideX = descriptor.m_StrideY = 5;
368 descriptor.m_PadLeft = 50;
369 descriptor.m_PadRight = 50;
370 descriptor.m_PadTop = 50;
371 descriptor.m_PadBottom = 50;
372 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
374 armnn::TensorInfo inputTensorInfo({ 5, 3, 52, 60 }, armnn::GetDataType<T>());
375 armnn::TensorInfo outputTensorInfo({ 5, 3, 11, 13 }, armnn::GetDataType<T>());
377 // Set quantization parameters if the requested type is a quantized type.
378 if(armnn::IsQuantizedType<T>())
380 inputTensorInfo.SetQuantizationScale(qScale);
381 inputTensorInfo.SetQuantizationOffset(qOffset);
382 outputTensorInfo.SetQuantizationScale(qScale);
383 outputTensorInfo.SetQuantizationOffset(qOffset);
386 std::vector<T> inputVec;
388 for (unsigned int i = 0 ; i < inputTensorInfo.GetShape().GetNumElements(); ++i)
390 inputVec.push_back(1);
393 auto input = MakeTensor<T, 4>(inputTensorInfo, inputVec);
395 std::vector<T> outputVec;
397 for (unsigned int i = 0 ; i < outputTensorInfo.GetShape().GetNumElements(); ++i)
399 outputVec.push_back(1);
402 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputVec);
404 return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected);
408 LayerTestResult<T, 4> SimpleL2Pooling2dTestCommon(armnn::IWorkloadFactory& workloadFactory,
409 armnn::DataLayoutIndexed dataLayout = armnn::DataLayout::NCHW,
413 armnn::Pooling2dDescriptor descriptor;
414 descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
415 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
416 descriptor.m_StrideX = descriptor.m_StrideY = 2;
417 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
418 descriptor.m_DataLayout = dataLayout;
420 armnn::TensorInfo inputTensorInfo = GetTensorInfo<T>(1, 2, 4, 4, dataLayout);
421 armnn::TensorInfo outputTensorInfo = GetTensorInfo<T>(1, 2, 2, 2, dataLayout);
423 std::vector<T> inputData(
424 QuantizedVector<T>(qScale, qOffset, {
425 1.0f, 7.0f, 5.0f, 5.0f,
426 1.0f, 7.0f, 5.0f, 5.0f,
427 3.0f, 3.0f, 1.0f, 1.0f,
428 3.0f, 3.0f, 1.0f, 1.0f,
430 1.0f, 7.0f, 0.0f, 0.0f,
431 1.0f, 7.0f, 2.0f, 0.0f,
432 0.0f, 2.0f, 1.0f, 1.0f,
433 0.0f, 0.0f, 1.0f, 1.0f,
436 std::vector<T> outputData(
437 QuantizedVector<T>(qScale, qOffset, {
445 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
446 if (dataLayout.GetDataLayout() == armnn::DataLayout::NHWC)
448 std::vector<T> tmp(inputData.size());
449 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data());
452 std::vector<T> tmp1(outputData.size());
453 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data());
457 auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
459 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputData);
461 return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected);
465 LayerTestResult<T, 4> L2Pooling2dSize3Stride1TestCommon(armnn::IWorkloadFactory& workloadFactory,
469 armnn::Pooling2dDescriptor descriptor;
470 descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
471 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
472 descriptor.m_StrideX = descriptor.m_StrideY = 1;
473 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
475 armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, armnn::GetDataType<T>());
476 auto input = MakeTensor<T, 4>(inputTensorInfo,
477 QuantizedVector<T>(qScale, qOffset, {
478 2.0f, 1.0f, 5.0f, 2.0f,
479 1.0f, 2.0f, 2.0f, 1.0f,
480 5.0f, 4.0f, 1.0f, 5.0f,
481 2.0f, 1.0f, 5.0f, 2.0f,
484 armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, armnn::GetDataType<T>());
485 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
486 QuantizedVector<T>(qScale, qOffset, {
491 return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected);
495 LayerTestResult<T, 4> L2Pooling2dSize3Stride3TestCommon(armnn::IWorkloadFactory& workloadFactory,
499 armnn::Pooling2dDescriptor descriptor;
500 descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
501 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
502 descriptor.m_StrideX = descriptor.m_StrideY = 3;
503 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
505 armnn::TensorInfo inputTensorInfo({ 1, 1, 9, 9 }, armnn::GetDataType<T>());
506 auto input = MakeTensor<T, 4>(inputTensorInfo,
507 QuantizedVector<T>(qScale, qOffset, {
508 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
509 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
510 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
511 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
512 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
513 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
514 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
515 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
516 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
519 armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, armnn::GetDataType<T>());
520 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
521 QuantizedVector<T>(qScale, qOffset, {
527 return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected);
531 LayerTestResult<T, 4> L2Pooling2dSize3Stride4TestCommon(armnn::IWorkloadFactory& workloadFactory,
535 armnn::Pooling2dDescriptor descriptor;
536 descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
537 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
538 descriptor.m_StrideX = descriptor.m_StrideY = 4;
539 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
541 armnn::TensorInfo inputTensorInfo({ 1, 1, 7, 7 }, armnn::GetDataType<T>());
542 auto input = MakeTensor<T, 4>(inputTensorInfo,
543 QuantizedVector<T>(qScale, qOffset, {
544 2.0f, 1.0f, 5.0f, 0.0f, 2.0f, 1.0f, 5.0f,
545 1.0f, 2.0f, 2.0f, 0.0f, 1.0f, 2.0f, 2.0f,
546 5.0f, 4.0f, 1.0f, 0.0f, 5.0f, 4.0f, 1.0f,
547 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
548 2.0f, 1.0f, 5.0f, 0.0f, 2.0f, 1.0f, 5.0f,
549 1.0f, 2.0f, 2.0f, 0.0f, 1.0f, 2.0f, 2.0f,
550 5.0f, 4.0f, 1.0f, 0.0f, 5.0f, 4.0f, 1.0f,
553 armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, armnn::GetDataType<T>());
554 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
555 QuantizedVector<T>(qScale, qOffset, {
560 return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected);
564 LayerTestResult<T, 4> L2Pooling2dSize7TestCommon(armnn::IWorkloadFactory& workloadFactory,
568 armnn::Pooling2dDescriptor descriptor;
569 descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
570 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 7;
571 descriptor.m_StrideX = descriptor.m_StrideY = 7;
572 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
574 armnn::TensorInfo inputTensorInfo({ 1, 1, 7, 7 }, armnn::GetDataType<T>());
575 auto input = MakeTensor<T, 4>(inputTensorInfo,
576 QuantizedVector<T>(qScale, qOffset, {
577 1.0f, 0.0f, 2.0f, 0.0f, 3.0f, 0.0f, 4.0f,
578 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
579 0.0f, 5.0f, 0.0f, 6.0f, 0.0f, 7.0f, 0.0f,
580 8.0f, 0.0f, 9.0f, 0.0f, 10.0f, 0.0f, 5.0f,
581 0.0f, 5.0f, 0.0f, 2.0f, 0.0f, 1.0f, 1.0f,
582 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
583 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
586 armnn::TensorInfo outputTensorInfo({ 1, 1, 1, 1 }, armnn::GetDataType<T>());
587 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
588 QuantizedVector<T>(qScale, qOffset, {
592 return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected);
596 LayerTestResult<T, 4> L2Pooling2dSize9TestCommon(armnn::IWorkloadFactory& workloadFactory,
600 armnn::Pooling2dDescriptor descriptor;
601 descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
602 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 9;
603 descriptor.m_StrideX = descriptor.m_StrideY = 9;
604 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
606 armnn::TensorInfo inputTensorInfo({ 1, 1, 9, 9 }, armnn::GetDataType<T>());
607 auto input = MakeTensor<T, 4>(inputTensorInfo,
608 QuantizedVector<T>(qScale, qOffset, {
609 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
610 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
611 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
612 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
613 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
614 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
615 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
616 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
617 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
620 armnn::TensorInfo outputTensorInfo({ 1, 1, 1, 1 }, armnn::GetDataType<T>());
621 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
622 QuantizedVector<T>(qScale, qOffset, {
626 return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected);
630 LayerTestResult<T, 4> AsymmetricNonSquarePooling2dTestCommon(armnn::IWorkloadFactory& workloadFactory,
634 armnn::TensorInfo inputTensorInfo({ 1, 1, 1, 3 }, armnn::GetDataType<T>());
635 armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, armnn::GetDataType<T>());
637 armnn::Pooling2dDescriptor descriptor;
638 descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
639 descriptor.m_PoolWidth = 2;
640 descriptor.m_PoolHeight = 3;
641 descriptor.m_StrideX = 2;
642 descriptor.m_StrideY = 1;
643 descriptor.m_PadLeft = 2;
644 descriptor.m_PadRight = 0;
645 descriptor.m_PadTop = 1;
646 descriptor.m_PadBottom = 2;
647 descriptor.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
648 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
650 // Construct input data.
651 auto input = MakeTensor<T, 4>(inputTensorInfo,
652 QuantizedVector<T>(qScale, qOffset, {
656 // These were calculated manually.
657 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
658 QuantizedVector<T>(qScale, qOffset, {
659 0.0f, 3.0f, 0.0f, 3.0f,
662 return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected);
666 LayerTestResult<T, 4> ComparePooling2dTestCommon(armnn::IWorkloadFactory& workloadFactory,
667 armnn::IWorkloadFactory& refWorkloadFactory,
668 armnn::PoolingAlgorithm poolingType,
672 const unsigned int inputWidth = 16;
673 const unsigned int inputHeight = 32;
674 const unsigned int channelCount = 2;
675 const unsigned int batchSize = 5;
677 const unsigned int poolSize = 3;
678 const unsigned int strideX = 2;
679 const unsigned int strideY = 4;
680 const unsigned int padX = 0;
681 const unsigned int padY = 0;
683 const unsigned int outputWidth = (inputWidth + 2 * padX + strideX - poolSize) / strideX;
684 const unsigned int outputHeight = (inputHeight + 2 * padY + strideY - poolSize) / strideY;
686 armnn::TensorInfo inputTensorInfo;
687 armnn::TensorInfo outputTensorInfo;
689 unsigned int inputShape[] = { batchSize, channelCount, inputHeight, inputWidth };
690 unsigned int outputShape[] = { batchSize, channelCount, outputHeight, outputWidth };
692 inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::GetDataType<T>());
693 outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::GetDataType<T>());
695 // Set quantization parameters if the requested type is a quantized type.
696 if(armnn::IsQuantizedType<T>())
698 inputTensorInfo.SetQuantizationScale(qScale);
699 inputTensorInfo.SetQuantizationOffset(qOffset);
700 outputTensorInfo.SetQuantizationScale(qScale);
701 outputTensorInfo.SetQuantizationOffset(qOffset);
704 boost::multi_array<T, 4> input = MakeRandomTensor<T, 4>(inputTensorInfo, 81715);
706 LayerTestResult<T, 4> comparisonResult(outputTensorInfo);
708 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
709 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
711 armnn::Pooling2dQueueDescriptor data;
712 armnn::WorkloadInfo info;
713 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
714 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
715 data.m_Parameters.m_PoolType = poolingType;
716 data.m_Parameters.m_PoolWidth = poolSize;
717 data.m_Parameters.m_PoolHeight = poolSize;
718 data.m_Parameters.m_StrideX = strideX;
719 data.m_Parameters.m_StrideY = strideY;
720 data.m_Parameters.m_PadLeft = padX;
721 data.m_Parameters.m_PadRight = padX;
722 data.m_Parameters.m_PadTop = padY;
723 data.m_Parameters.m_PadBottom = padY;
724 data.m_Parameters.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
726 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
727 std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refWorkloadFactory.CreateTensorHandle(inputTensorInfo);
729 // Don't execute if Pooling is not supported, as an exception will be raised.
730 armnn::BackendId backend = workloadFactory.GetBackendId();
731 const size_t reasonIfUnsupportedMaxLen = 255;
732 char reasonIfUnsupported[reasonIfUnsupportedMaxLen+1];
733 comparisonResult.supported = armnn::IsPooling2dSupported(backend, inputTensorInfo, outputTensorInfo,
735 reasonIfUnsupported, reasonIfUnsupportedMaxLen);
736 if (!comparisonResult.supported)
738 return comparisonResult;
741 armnn::Pooling2dQueueDescriptor refData = data;
742 armnn::WorkloadInfo refInfo = info;
743 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
744 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
746 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePooling2d(data, info);
747 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreatePooling2d(refData, refInfo);
749 outputHandleRef->Allocate();
750 inputHandleRef->Allocate();
751 inputHandle->Allocate();
752 outputHandle->Allocate();
754 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
755 CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]);
758 workloadRef->Execute();
760 CopyDataFromITensorHandle(&comparisonResult.output[0][0][0][0], outputHandle.get());
761 CopyDataFromITensorHandle(&comparisonResult.outputExpected[0][0][0][0], outputHandleRef.get());
763 return comparisonResult;
767 // Tests max pooling with the following parameters:
776 LayerTestResult<T, 4> SimpleMaxPooling2dSize2x2Stride2x2TestCommon(armnn::IWorkloadFactory& workloadFactory,
781 armnn::Pooling2dDescriptor descriptor;
782 descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
783 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
784 descriptor.m_StrideX = 2;
785 descriptor.m_StrideY = 2;
786 descriptor.m_PadLeft = descriptor.m_PadRight = forceNoPadding ? 0 : 3;
787 descriptor.m_PadTop = descriptor.m_PadBottom = 0;
788 descriptor.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
789 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
791 unsigned int inputWidth = 4;
792 unsigned int inputHeight = 4;
793 unsigned int outputWidth =
794 (inputWidth + descriptor.m_PadLeft + descriptor.m_PadRight + descriptor.m_StrideX - descriptor.m_PoolWidth) /
795 descriptor.m_StrideX;
796 unsigned int outputHeight =
797 (inputHeight + descriptor.m_PadTop + descriptor.m_PadBottom + descriptor.m_StrideY - descriptor.m_PoolHeight) /
798 descriptor.m_StrideY;
799 unsigned int channels = 1;
800 unsigned int batchSize = 1;
802 std::vector<float> inputData = {
803 510.0f, 222.0f, 780.0f, 654.0f,
804 141.0f, 276.0f, 15.0f, 546.0f,
805 303.0f, 618.0f, 582.0f, 339.0f,
806 438.0f, 564.0f, 573.0f, 402.0f
809 // Note that left and right edges will be 0.f, due to the 2x2 max pooling only accessing zeros here.
810 std::vector<float> expectedOutputDataWithPadding = {
811 0.0f, 510.0f, 780.0f, 654.0f, 0.0f,
812 0.0f, 438.0f, 618.0f, 402.0f, 0.0f
815 std::vector<float> expectedOutputDataNoPadding = {
820 armnn::TensorInfo inputTensorInfo({ batchSize, channels, inputHeight, inputWidth }, armnn::GetDataType<T>());
822 // Scale and offset should match input - we're just calculating maximum values.
823 armnn::TensorInfo outputTensorInfo({ batchSize, channels, outputHeight, outputWidth }, armnn::GetDataType<T>());
825 // Set quantization parameters if the requested type is a quantized type.
826 if(armnn::IsQuantizedType<T>())
828 inputTensorInfo.SetQuantizationScale(qScale);
829 inputTensorInfo.SetQuantizationOffset(qOffset);
830 outputTensorInfo.SetQuantizationScale(qScale);
831 outputTensorInfo.SetQuantizationOffset(qOffset);
834 auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, inputData));
836 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
837 forceNoPadding ? QuantizedVector<T>(qScale, qOffset, expectedOutputDataNoPadding) :
838 QuantizedVector<T>(qScale, qOffset, expectedOutputDataWithPadding));
840 return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected);
844 // Tests max pooling with the following parameters:
853 LayerTestResult<T, 4> IgnorePaddingAveragePooling2dSize3x2Stride2x2TestCommon(
854 armnn::IWorkloadFactory& workloadFactory,
859 armnn::Pooling2dDescriptor descriptor;
860 descriptor.m_PoolType = armnn::PoolingAlgorithm::Average;
861 descriptor.m_PoolWidth = 3;
862 descriptor.m_PoolHeight = 2;
863 descriptor.m_StrideX = 2;
864 descriptor.m_StrideY = 2;
865 descriptor.m_PadLeft = (forceNoPadding) ? 0 : 1;
866 descriptor.m_PadRight = descriptor.m_PadLeft;
867 descriptor.m_PadTop = 0;
868 descriptor.m_PadBottom = 0;
869 descriptor.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
870 descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
872 unsigned int inputWidth = 3;
873 unsigned int inputHeight = 2;
874 unsigned int outputWidth =
875 (inputWidth + descriptor.m_PadLeft + descriptor.m_PadRight + descriptor.m_StrideX - descriptor.m_PoolWidth) /
876 descriptor.m_StrideX;
877 unsigned int outputHeight =
878 (inputHeight + descriptor.m_PadTop + descriptor.m_PadBottom + descriptor.m_StrideY - descriptor.m_PoolHeight) /
879 descriptor.m_StrideY;
880 unsigned int channels = 1;
881 unsigned int batchSize = 1;
883 std::vector<float> inputData = {
888 std::vector<float> expectedOutputDataWithPadding = {
892 std::vector<float> expectedOutputDataNoPadding = {
896 armnn::TensorInfo inputTensorInfo({ batchSize, channels, inputHeight, inputWidth }, armnn::GetDataType<T>());
898 // Scale and offset should match input - we're just calculating average values.
899 armnn::TensorInfo outputTensorInfo({ batchSize, channels, outputHeight, outputWidth }, armnn::GetDataType<T>());
901 // Set quantization parameters if the requested type is a quantized type.
902 if(armnn::IsQuantizedType<T>())
904 inputTensorInfo.SetQuantizationScale(qScale);
905 inputTensorInfo.SetQuantizationOffset(qOffset);
906 outputTensorInfo.SetQuantizationScale(qScale);
907 outputTensorInfo.SetQuantizationOffset(qOffset);
910 auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, inputData));
912 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
913 forceNoPadding ? QuantizedVector<T>(qScale, qOffset, expectedOutputDataNoPadding) :
914 QuantizedVector<T>(qScale, qOffset, expectedOutputDataWithPadding));
916 return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected);
921 LayerTestResult<T, 4> IgnorePaddingSimpleMaxPooling2dTestCommon(armnn::IWorkloadFactory& workloadFactory,
925 armnn::Pooling2dDescriptor descriptor;
926 descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
927 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
928 descriptor.m_StrideX = descriptor.m_StrideY = 2;
929 descriptor.m_PadLeft = 1;
930 descriptor.m_PadRight = 1;
931 descriptor.m_PadTop = 1;
932 descriptor.m_PadBottom = 1;
933 descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
935 armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, armnn::GetDataType<T>());
936 armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, armnn::GetDataType<T>());
938 // Set quantization parameters if the requested type is a quantized type.
939 if(armnn::IsQuantizedType<T>())
941 inputTensorInfo.SetQuantizationScale(qScale);
942 inputTensorInfo.SetQuantizationOffset(qOffset);
943 outputTensorInfo.SetQuantizationScale(qScale);
944 outputTensorInfo.SetQuantizationOffset(qOffset);
947 auto input = MakeTensor<T, 4>(inputTensorInfo,
948 QuantizedVector<T>(qScale, qOffset, {
949 -1.0f, -2.0f, 3.0f, 4.0f,
950 -1.0f, -2.0f, 3.0f, 4.0f,
951 1.0f, 2.0f, -3.0f, -4.0f,
952 1.0f, 2.0f, -3.0f, -4.0f,
955 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
956 QuantizedVector<T>(qScale, qOffset, {
962 return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected);
966 LayerTestResult<T, 4> IgnorePaddingMaxPooling2dSize3TestCommon(armnn::IWorkloadFactory& workloadFactory,
970 armnn::Pooling2dDescriptor descriptor;
971 descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
972 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
973 descriptor.m_StrideX = descriptor.m_StrideY = 1;
974 descriptor.m_PadLeft = 1;
975 descriptor.m_PadRight = 1;
976 descriptor.m_PadTop = 1;
977 descriptor.m_PadBottom = 1;
978 descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
980 armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, armnn::GetDataType<T>());
981 armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4 }, armnn::GetDataType<T>());
983 // Set quantization parameters if the requested type is a quantized type.
984 if(armnn::IsQuantizedType<T>())
986 inputTensorInfo.SetQuantizationScale(qScale);
987 inputTensorInfo.SetQuantizationOffset(qOffset);
988 outputTensorInfo.SetQuantizationScale(qScale);
989 outputTensorInfo.SetQuantizationOffset(qOffset);
992 auto input = MakeTensor<T, 4>(inputTensorInfo,
993 QuantizedVector<T>(qScale, qOffset, {
994 -1.0f, -2.0f, 3.0f, 4.0f,
995 -1.0f, -2.0f, 3.0f, 4.0f,
996 1.0f, 2.0f, -3.0f, -4.0f,
997 1.0f, 2.0f, -3.0f, -4.0f,
1000 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
1001 QuantizedVector<T>(qScale, qOffset, {
1002 -1.0f, 3.0f, 4.0f, 4.0f,
1003 2.0f, 3.0f, 4.0f, 4.0f,
1004 2.0f, 3.0f, 4.0f, 4.0f,
1005 2.0f, 2.0f, 2.0f, -3.0f,
1008 return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected);
1011 template<typename T>
1012 LayerTestResult<T, 4> IgnorePaddingSimpleAveragePooling2dTestCommon(armnn::IWorkloadFactory& workloadFactory,
1013 float qScale = 1.0f,
1014 int32_t qOffset = 0)
1016 armnn::Pooling2dDescriptor descriptor;
1017 descriptor.m_PoolType = armnn::PoolingAlgorithm::Average;
1018 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
1019 descriptor.m_StrideX = descriptor.m_StrideY = 2;
1020 descriptor.m_PadLeft = 1;
1021 descriptor.m_PadRight = 1;
1022 descriptor.m_PadTop = 1;
1023 descriptor.m_PadBottom = 1;
1024 descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
1026 armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, armnn::GetDataType<T>());
1027 armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, armnn::GetDataType<T>());
1029 // Set quantization parameters if the requested type is a quantized type.
1030 if(armnn::IsQuantizedType<T>())
1032 inputTensorInfo.SetQuantizationScale(qScale);
1033 inputTensorInfo.SetQuantizationOffset(qOffset);
1034 outputTensorInfo.SetQuantizationScale(qScale);
1035 outputTensorInfo.SetQuantizationOffset(qOffset);
1038 auto input = MakeTensor<T, 4>(inputTensorInfo,
1039 QuantizedVector<T>(qScale, qOffset, {
1040 12.0f, 20.0f, 32.0f, 40.0f,
1041 12.0f, 20.0f, 32.0f, 40.0f,
1042 12.0f, 20.0f, 32.0f, 40.0f,
1043 12.0f, 20.0f, 32.0f, 40.0f,
1046 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
1047 QuantizedVector<T>(qScale, qOffset, {
1053 return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected);
1056 template<typename T>
1057 LayerTestResult<T, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon(armnn::IWorkloadFactory& workloadFactory,
1058 float qScale = 1.0f,
1059 int32_t qOffset = 0)
1061 armnn::Pooling2dDescriptor descriptor;
1062 descriptor.m_PoolType = armnn::PoolingAlgorithm::Average;
1063 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
1064 descriptor.m_StrideX = descriptor.m_StrideY = 2;
1065 descriptor.m_PadLeft = 0;
1066 descriptor.m_PadRight = 0;
1067 descriptor.m_PadTop = 0;
1068 descriptor.m_PadBottom = 0;
1069 descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
1070 descriptor.m_OutputShapeRounding = armnn::OutputShapeRounding::Ceiling;
1072 armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4}, armnn::GetDataType<T>());
1073 armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, armnn::GetDataType<T>());
1075 // Set quantization parameters if the requested type is a quantized type.
1076 if(armnn::IsQuantizedType<T>())
1078 inputTensorInfo.SetQuantizationScale(qScale);
1079 inputTensorInfo.SetQuantizationOffset(qOffset);
1080 outputTensorInfo.SetQuantizationScale(qScale);
1081 outputTensorInfo.SetQuantizationOffset(qOffset);
1084 auto input = MakeTensor<T, 4>(inputTensorInfo,
1085 QuantizedVector<T>(qScale, qOffset, {
1086 1.0f, 2.0f, 3.0f, 4.0f,
1087 1.0f, 2.0f, 3.0f, 4.0f,
1088 1.0f, 2.0f, 3.0f, 4.0f,
1089 1.0f, 2.0f, 3.0f, 4.0f,
1092 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
1093 QuantizedVector<T>(qScale, qOffset, {
1098 return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected);
1101 template<typename T>
1102 LayerTestResult<T, 4> IgnorePaddingAveragePooling2dSize3TestCommon(armnn::IWorkloadFactory& workloadFactory,
1103 float qScale = 1.0f,
1104 int32_t qOffset = 0)
1106 armnn::Pooling2dDescriptor descriptor;
1107 descriptor.m_PoolType = armnn::PoolingAlgorithm::Average;
1108 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
1109 descriptor.m_StrideX = descriptor.m_StrideY = 1;
1110 descriptor.m_PadLeft = 1;
1111 descriptor.m_PadRight = 1;
1112 descriptor.m_PadTop = 1;
1113 descriptor.m_PadBottom = 1;
1114 descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
1116 armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, armnn::GetDataType<T>());
1117 armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4 }, armnn::GetDataType<T>());
1119 // Set quantization parameters if the requested type is a quantized type.
1120 if(armnn::IsQuantizedType<T>())
1122 inputTensorInfo.SetQuantizationScale(qScale);
1123 inputTensorInfo.SetQuantizationOffset(qOffset);
1124 outputTensorInfo.SetQuantizationScale(qScale);
1125 outputTensorInfo.SetQuantizationOffset(qOffset);
1128 auto input = MakeTensor<T, 4>(inputTensorInfo,
1129 QuantizedVector<T>(qScale, qOffset, {
1130 9.0f, 27.0f, 18.0f, 36.0f,
1131 18.0f, 9.0f, 18.0f, 9.0f,
1132 27.0f, 18.0f, 9.0f, 27.0f,
1133 9.0f, 27.0f, 9.0f, 18.0f,
1136 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
1137 QuantizedVector<T>(qScale, qOffset, {
1138 7.0f, 11.0f, 13.0f, 9.0f,
1139 12.0f, 17.0f, 19.0f, 13.0f,
1140 12.0f, 16.0f, 16.0f, 10.0f,
1141 9.0f, 11.0f, 12.0f, 7.0f,
1144 return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected);
1147 template<typename T>
1148 LayerTestResult<T, 4> IgnorePaddingSimpleL2Pooling2dTestCommon(armnn::IWorkloadFactory& workloadFactory,
1149 float qScale = 1.0f,
1150 int32_t qOffset = 0)
1152 armnn::Pooling2dDescriptor descriptor;
1153 descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
1154 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
1155 descriptor.m_StrideX = descriptor.m_StrideY = 2;
1156 descriptor.m_PadLeft = 1;
1157 descriptor.m_PadRight = 1;
1158 descriptor.m_PadTop = 1;
1159 descriptor.m_PadBottom = 1;
1160 descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
1162 armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, armnn::GetDataType<T>());
1163 armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, armnn::GetDataType<T>());
1165 // Set quantization parameters if the requested type is a quantized type.
1166 if(armnn::IsQuantizedType<T>())
1168 inputTensorInfo.SetQuantizationScale(qScale);
1169 inputTensorInfo.SetQuantizationOffset(qOffset);
1170 outputTensorInfo.SetQuantizationScale(qScale);
1171 outputTensorInfo.SetQuantizationOffset(qOffset);
1174 auto input = MakeTensor<T, 4>(inputTensorInfo,
1175 QuantizedVector<T>(qScale, qOffset, {
1176 2.0f, 4.0f, 8.0f, 16.0f,
1177 4.0f, 2.0f, 2.0f, 4.0f,
1178 8.0f, 2.0f, 4.0f, 2.0f,
1179 16.0f, 2.0f, 2.0f, 8.0f,
1182 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
1183 QuantizedVector<T>(qScale, qOffset, {
1184 1.0f, 4.4721f, 8.0f,
1185 4.4721f, 2.6457f, 2.236f,
1186 8.0f, 1.4142f, 4.0f,
1189 return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected);
1192 template<typename T>
1193 LayerTestResult<T, 4> IgnorePaddingL2Pooling2dSize3TestCommon(armnn::IWorkloadFactory& workloadFactory,
1194 float qScale = 1.0f,
1195 int32_t qOffset = 0)
1197 armnn::Pooling2dDescriptor descriptor;
1198 descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
1199 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
1200 descriptor.m_StrideX = descriptor.m_StrideY = 1;
1201 descriptor.m_PadLeft = 1;
1202 descriptor.m_PadRight = 1;
1203 descriptor.m_PadTop = 1;
1204 descriptor.m_PadBottom = 1;
1205 descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
1207 armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, armnn::GetDataType<T>());
1208 armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4 }, armnn::GetDataType<T>());
1210 // Set quantization parameters if the requested type is a quantized type.
1211 if(armnn::IsQuantizedType<T>())
1213 inputTensorInfo.SetQuantizationScale(qScale);
1214 inputTensorInfo.SetQuantizationOffset(qOffset);
1215 outputTensorInfo.SetQuantizationScale(qScale);
1216 outputTensorInfo.SetQuantizationOffset(qOffset);
1219 auto input = MakeTensor<T, 4>(inputTensorInfo,
1220 QuantizedVector<T>(qScale, qOffset, {
1221 1.0f, 2.0f, 3.0f, 4.0f,
1222 1.0f, 2.0f, 3.0f, 4.0f,
1223 1.0f, 2.0f, 3.0f, 4.0f,
1224 1.0f, 2.0f, 3.0f, 4.0f,
1227 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
1228 QuantizedVector<T>(qScale, qOffset, {
1229 1.0540f, 1.7638f, 2.5385f, 2.3570f,
1230 1.2909f, 2.1602f, 3.1091f, 2.8867f,
1231 1.2909f, 2.1602f, 3.1091f, 2.8867f,
1232 1.0540f, 1.7638f, 2.5385f, 2.3570f,
1235 return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected);