2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
7 #include "QuantizeHelper.hpp"
9 #include <armnn/ArmNN.hpp>
11 #include <Permute.hpp>
13 #include <backendsCommon/CpuTensorHandle.hpp>
14 #include <backendsCommon/WorkloadFactory.hpp>
15 #include <backendsCommon/WorkloadInfo.hpp>
17 #include <test/TensorHelpers.hpp>
19 #include <boost/numeric/conversion/cast.hpp>
25 LayerTestResult<T, 4> SimplePooling2dTestImpl(armnn::IWorkloadFactory& workloadFactory,
26 armnn::Pooling2dDescriptor descriptor,
29 const boost::multi_array<T, 4>& input,
30 const boost::multi_array<T, 4>& outputExpected)
32 const armnn::DataLayoutIndexed dataLayout = descriptor.m_DataLayout;
33 auto heightIndex = dataLayout.GetHeightIndex();
34 auto widthIndex = dataLayout.GetWidthIndex();
35 auto channelsIndex = dataLayout.GetChannelsIndex();
37 unsigned int inputHeight = boost::numeric_cast<unsigned int>(input.shape()[heightIndex]);
38 unsigned int inputWidth = boost::numeric_cast<unsigned int>(input.shape()[widthIndex]);
39 unsigned int inputChannels = boost::numeric_cast<unsigned int>(input.shape()[channelsIndex]);
40 unsigned int inputBatchSize = boost::numeric_cast<unsigned int>(input.shape()[0]);
42 unsigned int outputHeight = boost::numeric_cast<unsigned int>(outputExpected.shape()[heightIndex]);
43 unsigned int outputWidth = boost::numeric_cast<unsigned int>(outputExpected.shape()[widthIndex]);
44 unsigned int outputChannels = boost::numeric_cast<unsigned int>(outputExpected.shape()[channelsIndex]);
45 unsigned int outputBatchSize = boost::numeric_cast<unsigned int>(outputExpected.shape()[0]);
47 armnn::TensorInfo inputTensorInfo = GetTensorInfo<T>(inputBatchSize, inputChannels, inputHeight,
48 inputWidth, dataLayout);
49 armnn::TensorInfo outputTensorInfo = GetTensorInfo<T>(outputBatchSize, outputChannels, outputHeight,
50 outputWidth, dataLayout);
52 // Set quantization parameters if the requested type is a quantized type.
53 if(armnn::IsQuantizedType<T>())
55 inputTensorInfo.SetQuantizationScale(qScale);
56 inputTensorInfo.SetQuantizationOffset(qOffset);
57 outputTensorInfo.SetQuantizationScale(qScale);
58 outputTensorInfo.SetQuantizationOffset(qOffset);
61 LayerTestResult<T, 4> result(outputTensorInfo);
63 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
64 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
66 armnn::Pooling2dQueueDescriptor queueDescriptor;
67 queueDescriptor.m_Parameters = descriptor;
68 queueDescriptor.m_Parameters.m_DataLayout = dataLayout;
70 armnn::WorkloadInfo workloadInfo;
71 AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfo, inputHandle.get());
72 AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
74 // Don't execute if Pooling is not supported, as an exception will be raised.
75 armnn::BackendId backend = workloadFactory.GetBackendId();
76 const size_t reasonIfUnsupportedMaxLen = 255;
77 char reasonIfUnsupported[reasonIfUnsupportedMaxLen+1];
78 result.supported = armnn::IsPooling2dSupported(backend, inputTensorInfo, outputTensorInfo,
79 queueDescriptor.m_Parameters,
80 reasonIfUnsupported, reasonIfUnsupportedMaxLen);
81 if (!result.supported)
86 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePooling2d(queueDescriptor, workloadInfo);
88 inputHandle->Allocate();
89 outputHandle->Allocate();
91 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
95 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
97 result.outputExpected = outputExpected;
103 // Tests max pooling with the following parameters:
112 LayerTestResult<T, 4> SimpleMaxPooling2dSize3x3Stride2x4TestCommon(armnn::IWorkloadFactory& workloadFactory,
117 armnn::Pooling2dDescriptor descriptor;
118 descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
119 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
120 descriptor.m_StrideX = 2;
121 descriptor.m_StrideY = 4;
122 // forceNoPadding is mainly used for compatibility with ARM Compute.
123 // As of 16/05/2017, it errors if padX or padY are equal to or greater than the pool size.
124 descriptor.m_PadLeft = descriptor.m_PadRight = forceNoPadding ? 0 : 3;
125 descriptor.m_PadTop = descriptor.m_PadBottom = 0;
126 descriptor.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
127 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
129 unsigned int inputWidth = 8;
130 unsigned int inputHeight = 13;
131 unsigned int outputWidth =
132 (inputWidth + descriptor.m_PadLeft + descriptor.m_PadRight + descriptor.m_StrideX - descriptor.m_PoolWidth) /
133 descriptor.m_StrideX;
134 unsigned int outputHeight =
135 (inputHeight + descriptor.m_PadTop + descriptor.m_PadBottom + descriptor.m_StrideY - descriptor.m_PoolHeight) /
136 descriptor.m_StrideY;
137 unsigned int channels = 2;
138 unsigned int batchSize = 2;
140 armnn::TensorInfo inputTensorInfo({ batchSize, channels, inputHeight, inputWidth }, armnn::GetDataType<T>());
141 armnn::TensorInfo outputTensorInfo({ batchSize, channels, outputHeight, outputWidth }, armnn::GetDataType<T>());
143 // Set quantization parameters if the requested type is a quantized type.
144 if(armnn::IsQuantizedType<T>())
146 inputTensorInfo.SetQuantizationScale(qScale);
147 inputTensorInfo.SetQuantizationOffset(qOffset);
148 outputTensorInfo.SetQuantizationScale(qScale);
149 outputTensorInfo.SetQuantizationOffset(qOffset);
152 std::vector<float> singleChannelData({
153 0.0f, 4.0f, 8.0f, 1.0f, 6.0f, 4.0f, 5.0f, 8.0f,
154 1.0f, 1.0f, 6.0f, 0.0f, 3.0f, 7.0f, 4.0f, 7.0f,
155 8.0f, 5.0f, 0.0f, 0.0f, 8.0f, 3.0f, 4.0f, 3.0f,
156 8.0f, 2.0f, 5.0f, 4.0f, 1.0f, 9.0f, 2.0f, 0.0f,
157 5.0f, 4.0f, 5.0f, 0.0f, 0.0f, 0.0f, 7.0f, 2.0f,
158 1.0f, 2.0f, 6.0f, 2.0f, 7.0f, 9.0f, 5.0f, 2.0f,
159 9.0f, 7.0f, 3.0f, 1.0f, 3.0f, 4.0f, 8.0f, 3.0f,
160 1.0f, 0.0f, 0.0f, 5.0f, 5.0f, 4.0f, 2.0f, 0.0f,
161 6.0f, 4.0f, 3.0f, 6.0f, 9.0f, 5.0f, 5.0f, 6.0f,
162 8.0f, 7.0f, 9.0f, 6.0f, 1.0f, 4.0f, 1.0f, 9.0f,
163 7.0f, 1.0f, 9.0f, 2.0f, 9.0f, 9.0f, 8.0f, 1.0f,
164 4.0f, 4.0f, 5.0f, 9.0f, 2.0f, 6.0f, 6.0f, 4.0f,
165 3.0f, 5.0f, 4.0f, 0.0f, 1.0f, 5.0f, 9.0f, 7.0f,
168 // Constructs input data.
169 std::vector<float> inputData;
170 auto negator = [](float f) { return -f; };
172 // First image (two channels where the second channel is the negative of the first one).
173 inputData.insert(inputData.end(), singleChannelData.begin(), singleChannelData.end());
174 std::transform(singleChannelData.begin(), singleChannelData.end(), std::back_inserter(inputData), negator);
176 // Second image (same as first image).
177 inputData.insert(inputData.end(), singleChannelData.begin(), singleChannelData.end());
178 std::transform(singleChannelData.begin(), singleChannelData.end(), std::back_inserter(inputData), negator);
180 auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, inputData));
182 // These were calculated manually.
183 auto shape(GetTensorShapeAsArray<4>(outputTensorInfo));
184 boost::multi_array<T, 4> outputExpected(shape);
187 outputExpected = MakeTensor<T, 4>(outputTensorInfo,
188 QuantizedVector<T>(qScale, qOffset, {
208 outputExpected = MakeTensor<T, 4>(outputTensorInfo,
209 QuantizedVector<T>(qScale, qOffset, {
210 0.0f, 8.0f, 8.0f, 8.0f, 8.0f, 8.0f,
211 0.0f, 9.0f, 7.0f, 9.0f, 9.0f, 3.0f,
212 0.0f, 8.0f, 9.0f, 9.0f, 9.0f, 9.0f,
214 0.0f, 0.0f, 0.0f, 0.0f,-3.0f, 0.0f,
215 0.0f,-1.0f, 0.0f, 0.0f, 0.0f, 0.0f,
216 0.0f,-1.0f,-1.0f,-1.0f,-1.0f, 0.0f,
218 0.0f, 8.0f, 8.0f, 8.0f, 8.0f, 8.0f,
219 0.0f, 9.0f, 7.0f, 9.0f, 9.0f, 3.0f,
220 0.0f, 8.0f, 9.0f, 9.0f, 9.0f, 9.0f,
222 0.0f, 0.0f, 0.0f, 0.0f,-3.0f, 0.0f,
223 0.0f,-1.0f, 0.0f, 0.0f, 0.0f, 0.0f,
224 0.0f,-1.0f,-1.0f,-1.0f,-1.0f, 0.0f
228 return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected);
232 LayerTestResult<T, 4> SimpleMaxPooling2dTestCommon(armnn::IWorkloadFactory& workloadFactory,
233 const armnn::DataLayoutIndexed& dataLayout = armnn::DataLayout::NCHW,
237 armnn::Pooling2dDescriptor descriptor;
238 descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
239 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
240 descriptor.m_StrideX = descriptor.m_StrideY = 2;
241 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
242 descriptor.m_DataLayout = dataLayout;
244 armnn::TensorInfo inputTensorInfo = GetTensorInfo<T>(1, 2, 4, 4, dataLayout);
245 armnn::TensorInfo outputTensorInfo = GetTensorInfo<T>(1, 2, 2, 2, dataLayout);
247 // Set quantization parameters if the requested type is a quantized type.
248 if(armnn::IsQuantizedType<T>())
250 inputTensorInfo.SetQuantizationScale(qScale);
251 inputTensorInfo.SetQuantizationOffset(qOffset);
252 outputTensorInfo.SetQuantizationScale(qScale);
253 outputTensorInfo.SetQuantizationOffset(qOffset);
256 std::vector<T> inputData(
257 QuantizedVector<T>(qScale, qOffset, {
258 1.0f, 2.0f, 5.0f, 6.0f,
259 3.0f, 4.0f, 7.0f, 8.0f,
260 9.0f, 10.0f, 13.0f, 14.0f,
261 11.0f, 12.0f, 15.0f, 16.0f,
263 17.0f, 18.0f, 21.0f, 22.0f,
264 19.0f, 20.0f, 23.0f, 24.0f,
265 25.0f, 26.0f, 29.0f, 30.0f,
266 27.0f, 28.0f, 31.0f, 32.0f,
269 std::vector<T> outputData(
270 QuantizedVector<T>(qScale, qOffset, {
278 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
279 if (dataLayout.GetDataLayout() == armnn::DataLayout::NHWC)
281 std::vector<T> tmp(inputData.size());
282 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data());
285 std::vector<T> tmp1(outputData.size());
286 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data());
290 auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
292 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputData);
294 return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected);
298 LayerTestResult<T, 4> SimpleAveragePooling2dTestCommon(armnn::IWorkloadFactory& workloadFactory,
299 armnn::DataLayoutIndexed dataLayout = armnn::DataLayout::NCHW,
303 armnn::Pooling2dDescriptor descriptor;
304 descriptor.m_PoolType = armnn::PoolingAlgorithm::Average;
305 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
306 descriptor.m_StrideX = descriptor.m_StrideY = 2;
307 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
308 descriptor.m_DataLayout = dataLayout;
310 armnn::TensorInfo inputTensorInfo = GetTensorInfo<T>(1, 2, 4, 4, dataLayout);
311 armnn::TensorInfo outputTensorInfo = GetTensorInfo<T>(1, 2, 2, 2, dataLayout);
313 // Set quantization parameters if the requested type is a quantized type.
314 if(armnn::IsQuantizedType<T>())
316 inputTensorInfo.SetQuantizationScale(qScale);
317 inputTensorInfo.SetQuantizationOffset(qOffset);
318 outputTensorInfo.SetQuantizationScale(qScale);
319 outputTensorInfo.SetQuantizationOffset(qOffset);
322 std::vector<T> inputData(
323 QuantizedVector<T>(qScale, qOffset, {
324 2.0f, 2.0f, 6.0f, 6.0f,
325 4.0f, 4.0f, 8.0f, 8.0f,
326 10.0f, 12.0f, 14.0f, 16.0f,
327 10.0f, 12.0f, 16.0f, 14.0f,
329 18.0f, 20.0f, 24.0f, 22.0f,
330 20.0f, 18.0f, 22.0f, 24.0f,
331 26.0f, 28.0f, 0.0f, 0.0f,
332 26.0f, 28.0f, 0.0f, 0.0f,
335 std::vector<T> outputData(
336 QuantizedVector<T>(qScale, qOffset, {
344 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
345 if (dataLayout.GetDataLayout() == armnn::DataLayout::NHWC)
347 std::vector<T> tmp(inputData.size());
348 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data());
351 std::vector<T> tmp1(outputData.size());
352 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data());
356 auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
358 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputData);
360 return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected);
364 LayerTestResult<T, 4> LargeTensorsAveragePooling2dTestCommon(armnn::IWorkloadFactory& workloadFactory,
368 armnn::Pooling2dDescriptor descriptor;
369 descriptor.m_PoolType = armnn::PoolingAlgorithm::Average;
370 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 100;
371 descriptor.m_StrideX = descriptor.m_StrideY = 5;
372 descriptor.m_PadLeft = 50;
373 descriptor.m_PadRight = 50;
374 descriptor.m_PadTop = 50;
375 descriptor.m_PadBottom = 50;
376 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
378 armnn::TensorInfo inputTensorInfo({ 5, 3, 52, 60 }, armnn::GetDataType<T>());
379 armnn::TensorInfo outputTensorInfo({ 5, 3, 11, 13 }, armnn::GetDataType<T>());
381 // Set quantization parameters if the requested type is a quantized type.
382 if(armnn::IsQuantizedType<T>())
384 inputTensorInfo.SetQuantizationScale(qScale);
385 inputTensorInfo.SetQuantizationOffset(qOffset);
386 outputTensorInfo.SetQuantizationScale(qScale);
387 outputTensorInfo.SetQuantizationOffset(qOffset);
390 std::vector<T> inputVec;
392 for (unsigned int i = 0 ; i < inputTensorInfo.GetShape().GetNumElements(); ++i)
394 inputVec.push_back(1);
397 auto input = MakeTensor<T, 4>(inputTensorInfo, inputVec);
399 std::vector<T> outputVec;
401 for (unsigned int i = 0 ; i < outputTensorInfo.GetShape().GetNumElements(); ++i)
403 outputVec.push_back(1);
406 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputVec);
408 return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected);
412 LayerTestResult<T, 4> SimpleL2Pooling2dTestCommon(armnn::IWorkloadFactory& workloadFactory,
413 armnn::DataLayoutIndexed dataLayout = armnn::DataLayout::NCHW,
417 armnn::Pooling2dDescriptor descriptor;
418 descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
419 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
420 descriptor.m_StrideX = descriptor.m_StrideY = 2;
421 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
422 descriptor.m_DataLayout = dataLayout;
424 armnn::TensorInfo inputTensorInfo = GetTensorInfo<T>(1, 2, 4, 4, dataLayout);
425 armnn::TensorInfo outputTensorInfo = GetTensorInfo<T>(1, 2, 2, 2, dataLayout);
427 std::vector<T> inputData(
428 QuantizedVector<T>(qScale, qOffset, {
429 1.0f, 7.0f, 5.0f, 5.0f,
430 1.0f, 7.0f, 5.0f, 5.0f,
431 3.0f, 3.0f, 1.0f, 1.0f,
432 3.0f, 3.0f, 1.0f, 1.0f,
434 1.0f, 7.0f, 0.0f, 0.0f,
435 1.0f, 7.0f, 2.0f, 0.0f,
436 0.0f, 2.0f, 1.0f, 1.0f,
437 0.0f, 0.0f, 1.0f, 1.0f,
440 std::vector<T> outputData(
441 QuantizedVector<T>(qScale, qOffset, {
449 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
450 if (dataLayout.GetDataLayout() == armnn::DataLayout::NHWC)
452 std::vector<T> tmp(inputData.size());
453 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data());
456 std::vector<T> tmp1(outputData.size());
457 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data());
461 auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
463 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputData);
465 return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected);
469 LayerTestResult<T, 4> L2Pooling2dSize3Stride1TestCommon(armnn::IWorkloadFactory& workloadFactory,
473 armnn::Pooling2dDescriptor descriptor;
474 descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
475 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
476 descriptor.m_StrideX = descriptor.m_StrideY = 1;
477 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
479 armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, armnn::GetDataType<T>());
480 auto input = MakeTensor<T, 4>(inputTensorInfo,
481 QuantizedVector<T>(qScale, qOffset, {
482 2.0f, 1.0f, 5.0f, 2.0f,
483 1.0f, 2.0f, 2.0f, 1.0f,
484 5.0f, 4.0f, 1.0f, 5.0f,
485 2.0f, 1.0f, 5.0f, 2.0f,
488 armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, armnn::GetDataType<T>());
489 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
490 QuantizedVector<T>(qScale, qOffset, {
495 return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected);
499 LayerTestResult<T, 4> L2Pooling2dSize3Stride3TestCommon(armnn::IWorkloadFactory& workloadFactory,
503 armnn::Pooling2dDescriptor descriptor;
504 descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
505 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
506 descriptor.m_StrideX = descriptor.m_StrideY = 3;
507 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
509 armnn::TensorInfo inputTensorInfo({ 1, 1, 9, 9 }, armnn::GetDataType<T>());
510 auto input = MakeTensor<T, 4>(inputTensorInfo,
511 QuantizedVector<T>(qScale, qOffset, {
512 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
513 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
514 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
515 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
516 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
517 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
518 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
519 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
520 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
523 armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, armnn::GetDataType<T>());
524 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
525 QuantizedVector<T>(qScale, qOffset, {
531 return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected);
535 LayerTestResult<T, 4> L2Pooling2dSize3Stride4TestCommon(armnn::IWorkloadFactory& workloadFactory,
539 armnn::Pooling2dDescriptor descriptor;
540 descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
541 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
542 descriptor.m_StrideX = descriptor.m_StrideY = 4;
543 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
545 armnn::TensorInfo inputTensorInfo({ 1, 1, 7, 7 }, armnn::GetDataType<T>());
546 auto input = MakeTensor<T, 4>(inputTensorInfo,
547 QuantizedVector<T>(qScale, qOffset, {
548 2.0f, 1.0f, 5.0f, 0.0f, 2.0f, 1.0f, 5.0f,
549 1.0f, 2.0f, 2.0f, 0.0f, 1.0f, 2.0f, 2.0f,
550 5.0f, 4.0f, 1.0f, 0.0f, 5.0f, 4.0f, 1.0f,
551 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
552 2.0f, 1.0f, 5.0f, 0.0f, 2.0f, 1.0f, 5.0f,
553 1.0f, 2.0f, 2.0f, 0.0f, 1.0f, 2.0f, 2.0f,
554 5.0f, 4.0f, 1.0f, 0.0f, 5.0f, 4.0f, 1.0f,
557 armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, armnn::GetDataType<T>());
558 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
559 QuantizedVector<T>(qScale, qOffset, {
564 return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected);
568 LayerTestResult<T, 4> L2Pooling2dSize7TestCommon(armnn::IWorkloadFactory& workloadFactory,
572 armnn::Pooling2dDescriptor descriptor;
573 descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
574 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 7;
575 descriptor.m_StrideX = descriptor.m_StrideY = 7;
576 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
578 armnn::TensorInfo inputTensorInfo({ 1, 1, 7, 7 }, armnn::GetDataType<T>());
579 auto input = MakeTensor<T, 4>(inputTensorInfo,
580 QuantizedVector<T>(qScale, qOffset, {
581 1.0f, 0.0f, 2.0f, 0.0f, 3.0f, 0.0f, 4.0f,
582 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
583 0.0f, 5.0f, 0.0f, 6.0f, 0.0f, 7.0f, 0.0f,
584 8.0f, 0.0f, 9.0f, 0.0f, 10.0f, 0.0f, 5.0f,
585 0.0f, 5.0f, 0.0f, 2.0f, 0.0f, 1.0f, 1.0f,
586 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
587 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
590 armnn::TensorInfo outputTensorInfo({ 1, 1, 1, 1 }, armnn::GetDataType<T>());
591 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
592 QuantizedVector<T>(qScale, qOffset, {
596 return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected);
600 LayerTestResult<T, 4> L2Pooling2dSize9TestCommon(armnn::IWorkloadFactory& workloadFactory,
604 armnn::Pooling2dDescriptor descriptor;
605 descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
606 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 9;
607 descriptor.m_StrideX = descriptor.m_StrideY = 9;
608 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
610 armnn::TensorInfo inputTensorInfo({ 1, 1, 9, 9 }, armnn::GetDataType<T>());
611 auto input = MakeTensor<T, 4>(inputTensorInfo,
612 QuantizedVector<T>(qScale, qOffset, {
613 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
614 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
615 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
616 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
617 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
618 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
619 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
620 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
621 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
624 armnn::TensorInfo outputTensorInfo({ 1, 1, 1, 1 }, armnn::GetDataType<T>());
625 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
626 QuantizedVector<T>(qScale, qOffset, {
630 return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected);
634 LayerTestResult<T, 4> AsymmetricNonSquarePooling2dTestCommon(armnn::IWorkloadFactory& workloadFactory,
638 armnn::TensorInfo inputTensorInfo({ 1, 1, 1, 3 }, armnn::GetDataType<T>());
639 armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, armnn::GetDataType<T>());
641 armnn::Pooling2dDescriptor descriptor;
642 descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
643 descriptor.m_PoolWidth = 2;
644 descriptor.m_PoolHeight = 3;
645 descriptor.m_StrideX = 2;
646 descriptor.m_StrideY = 1;
647 descriptor.m_PadLeft = 2;
648 descriptor.m_PadRight = 0;
649 descriptor.m_PadTop = 1;
650 descriptor.m_PadBottom = 2;
651 descriptor.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
652 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
654 // Construct input data.
655 auto input = MakeTensor<T, 4>(inputTensorInfo,
656 QuantizedVector<T>(qScale, qOffset, {
660 // These were calculated manually.
661 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
662 QuantizedVector<T>(qScale, qOffset, {
663 0.0f, 3.0f, 0.0f, 3.0f,
666 return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected);
670 LayerTestResult<T, 4> ComparePooling2dTestCommon(armnn::IWorkloadFactory& workloadFactory,
671 armnn::IWorkloadFactory& refWorkloadFactory,
672 armnn::PoolingAlgorithm poolingType,
676 const unsigned int inputWidth = 16;
677 const unsigned int inputHeight = 32;
678 const unsigned int channelCount = 2;
679 const unsigned int batchSize = 5;
681 const unsigned int poolSize = 3;
682 const unsigned int strideX = 2;
683 const unsigned int strideY = 4;
684 const unsigned int padX = 0;
685 const unsigned int padY = 0;
687 const unsigned int outputWidth = (inputWidth + 2 * padX + strideX - poolSize) / strideX;
688 const unsigned int outputHeight = (inputHeight + 2 * padY + strideY - poolSize) / strideY;
690 armnn::TensorInfo inputTensorInfo;
691 armnn::TensorInfo outputTensorInfo;
693 unsigned int inputShape[] = { batchSize, channelCount, inputHeight, inputWidth };
694 unsigned int outputShape[] = { batchSize, channelCount, outputHeight, outputWidth };
696 inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::GetDataType<T>());
697 outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::GetDataType<T>());
699 // Set quantization parameters if the requested type is a quantized type.
700 if(armnn::IsQuantizedType<T>())
702 inputTensorInfo.SetQuantizationScale(qScale);
703 inputTensorInfo.SetQuantizationOffset(qOffset);
704 outputTensorInfo.SetQuantizationScale(qScale);
705 outputTensorInfo.SetQuantizationOffset(qOffset);
708 boost::multi_array<T, 4> input = MakeRandomTensor<T, 4>(inputTensorInfo, 81715);
710 LayerTestResult<T, 4> comparisonResult(outputTensorInfo);
712 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
713 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
715 armnn::Pooling2dQueueDescriptor data;
716 armnn::WorkloadInfo info;
717 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
718 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
719 data.m_Parameters.m_PoolType = poolingType;
720 data.m_Parameters.m_PoolWidth = poolSize;
721 data.m_Parameters.m_PoolHeight = poolSize;
722 data.m_Parameters.m_StrideX = strideX;
723 data.m_Parameters.m_StrideY = strideY;
724 data.m_Parameters.m_PadLeft = padX;
725 data.m_Parameters.m_PadRight = padX;
726 data.m_Parameters.m_PadTop = padY;
727 data.m_Parameters.m_PadBottom = padY;
728 data.m_Parameters.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
730 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
731 std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refWorkloadFactory.CreateTensorHandle(inputTensorInfo);
733 // Don't execute if Pooling is not supported, as an exception will be raised.
734 armnn::BackendId backend = workloadFactory.GetBackendId();
735 const size_t reasonIfUnsupportedMaxLen = 255;
736 char reasonIfUnsupported[reasonIfUnsupportedMaxLen+1];
737 comparisonResult.supported = armnn::IsPooling2dSupported(backend, inputTensorInfo, outputTensorInfo,
739 reasonIfUnsupported, reasonIfUnsupportedMaxLen);
740 if (!comparisonResult.supported)
742 return comparisonResult;
745 armnn::Pooling2dQueueDescriptor refData = data;
746 armnn::WorkloadInfo refInfo = info;
747 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
748 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
750 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePooling2d(data, info);
751 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreatePooling2d(refData, refInfo);
753 outputHandleRef->Allocate();
754 inputHandleRef->Allocate();
755 inputHandle->Allocate();
756 outputHandle->Allocate();
758 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
759 CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]);
762 workloadRef->Execute();
764 CopyDataFromITensorHandle(&comparisonResult.output[0][0][0][0], outputHandle.get());
765 CopyDataFromITensorHandle(&comparisonResult.outputExpected[0][0][0][0], outputHandleRef.get());
767 return comparisonResult;
771 // Tests max pooling with the following parameters:
780 LayerTestResult<T, 4> SimpleMaxPooling2dSize2x2Stride2x2TestCommon(armnn::IWorkloadFactory& workloadFactory,
785 armnn::Pooling2dDescriptor descriptor;
786 descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
787 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
788 descriptor.m_StrideX = 2;
789 descriptor.m_StrideY = 2;
790 descriptor.m_PadLeft = descriptor.m_PadRight = forceNoPadding ? 0 : 3;
791 descriptor.m_PadTop = descriptor.m_PadBottom = 0;
792 descriptor.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
793 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
795 unsigned int inputWidth = 4;
796 unsigned int inputHeight = 4;
797 unsigned int outputWidth =
798 (inputWidth + descriptor.m_PadLeft + descriptor.m_PadRight + descriptor.m_StrideX - descriptor.m_PoolWidth) /
799 descriptor.m_StrideX;
800 unsigned int outputHeight =
801 (inputHeight + descriptor.m_PadTop + descriptor.m_PadBottom + descriptor.m_StrideY - descriptor.m_PoolHeight) /
802 descriptor.m_StrideY;
803 unsigned int channels = 1;
804 unsigned int batchSize = 1;
806 std::vector<float> inputData = {
807 510.0f, 222.0f, 780.0f, 654.0f,
808 141.0f, 276.0f, 15.0f, 546.0f,
809 303.0f, 618.0f, 582.0f, 339.0f,
810 438.0f, 564.0f, 573.0f, 402.0f
813 // Note that left and right edges will be 0.f, due to the 2x2 max pooling only accessing zeros here.
814 std::vector<float> expectedOutputDataWithPadding = {
815 0.0f, 510.0f, 780.0f, 654.0f, 0.0f,
816 0.0f, 438.0f, 618.0f, 402.0f, 0.0f
819 std::vector<float> expectedOutputDataNoPadding = {
824 armnn::TensorInfo inputTensorInfo({ batchSize, channels, inputHeight, inputWidth }, armnn::GetDataType<T>());
826 // Scale and offset should match input - we're just calculating maximum values.
827 armnn::TensorInfo outputTensorInfo({ batchSize, channels, outputHeight, outputWidth }, armnn::GetDataType<T>());
829 // Set quantization parameters if the requested type is a quantized type.
830 if(armnn::IsQuantizedType<T>())
832 inputTensorInfo.SetQuantizationScale(qScale);
833 inputTensorInfo.SetQuantizationOffset(qOffset);
834 outputTensorInfo.SetQuantizationScale(qScale);
835 outputTensorInfo.SetQuantizationOffset(qOffset);
838 auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, inputData));
840 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
841 forceNoPadding ? QuantizedVector<T>(qScale, qOffset, expectedOutputDataNoPadding) :
842 QuantizedVector<T>(qScale, qOffset, expectedOutputDataWithPadding));
844 return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected);
848 // Tests max pooling with the following parameters:
857 LayerTestResult<T, 4> IgnorePaddingAveragePooling2dSize3x2Stride2x2TestCommon(
858 armnn::IWorkloadFactory& workloadFactory,
863 armnn::Pooling2dDescriptor descriptor;
864 descriptor.m_PoolType = armnn::PoolingAlgorithm::Average;
865 descriptor.m_PoolWidth = 3;
866 descriptor.m_PoolHeight = 2;
867 descriptor.m_StrideX = 2;
868 descriptor.m_StrideY = 2;
869 descriptor.m_PadLeft = (forceNoPadding) ? 0 : 1;
870 descriptor.m_PadRight = descriptor.m_PadLeft;
871 descriptor.m_PadTop = 0;
872 descriptor.m_PadBottom = 0;
873 descriptor.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
874 descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
876 unsigned int inputWidth = 3;
877 unsigned int inputHeight = 2;
878 unsigned int outputWidth =
879 (inputWidth + descriptor.m_PadLeft + descriptor.m_PadRight + descriptor.m_StrideX - descriptor.m_PoolWidth) /
880 descriptor.m_StrideX;
881 unsigned int outputHeight =
882 (inputHeight + descriptor.m_PadTop + descriptor.m_PadBottom + descriptor.m_StrideY - descriptor.m_PoolHeight) /
883 descriptor.m_StrideY;
884 unsigned int channels = 1;
885 unsigned int batchSize = 1;
887 std::vector<float> inputData = {
892 std::vector<float> expectedOutputDataWithPadding = {
896 std::vector<float> expectedOutputDataNoPadding = {
900 armnn::TensorInfo inputTensorInfo({ batchSize, channels, inputHeight, inputWidth }, armnn::GetDataType<T>());
902 // Scale and offset should match input - we're just calculating average values.
903 armnn::TensorInfo outputTensorInfo({ batchSize, channels, outputHeight, outputWidth }, armnn::GetDataType<T>());
905 // Set quantization parameters if the requested type is a quantized type.
906 if(armnn::IsQuantizedType<T>())
908 inputTensorInfo.SetQuantizationScale(qScale);
909 inputTensorInfo.SetQuantizationOffset(qOffset);
910 outputTensorInfo.SetQuantizationScale(qScale);
911 outputTensorInfo.SetQuantizationOffset(qOffset);
914 auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, inputData));
916 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
917 forceNoPadding ? QuantizedVector<T>(qScale, qOffset, expectedOutputDataNoPadding) :
918 QuantizedVector<T>(qScale, qOffset, expectedOutputDataWithPadding));
920 return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected);
925 LayerTestResult<T, 4> IgnorePaddingSimpleMaxPooling2dTestCommon(armnn::IWorkloadFactory& workloadFactory,
929 armnn::Pooling2dDescriptor descriptor;
930 descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
931 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
932 descriptor.m_StrideX = descriptor.m_StrideY = 2;
933 descriptor.m_PadLeft = 1;
934 descriptor.m_PadRight = 1;
935 descriptor.m_PadTop = 1;
936 descriptor.m_PadBottom = 1;
937 descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
939 armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, armnn::GetDataType<T>());
940 armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, armnn::GetDataType<T>());
942 // Set quantization parameters if the requested type is a quantized type.
943 if(armnn::IsQuantizedType<T>())
945 inputTensorInfo.SetQuantizationScale(qScale);
946 inputTensorInfo.SetQuantizationOffset(qOffset);
947 outputTensorInfo.SetQuantizationScale(qScale);
948 outputTensorInfo.SetQuantizationOffset(qOffset);
951 auto input = MakeTensor<T, 4>(inputTensorInfo,
952 QuantizedVector<T>(qScale, qOffset, {
953 -1.0f, -2.0f, 3.0f, 4.0f,
954 -1.0f, -2.0f, 3.0f, 4.0f,
955 1.0f, 2.0f, -3.0f, -4.0f,
956 1.0f, 2.0f, -3.0f, -4.0f,
959 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
960 QuantizedVector<T>(qScale, qOffset, {
966 return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected);
970 LayerTestResult<T, 4> IgnorePaddingMaxPooling2dSize3TestCommon(armnn::IWorkloadFactory& workloadFactory,
974 armnn::Pooling2dDescriptor descriptor;
975 descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
976 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
977 descriptor.m_StrideX = descriptor.m_StrideY = 1;
978 descriptor.m_PadLeft = 1;
979 descriptor.m_PadRight = 1;
980 descriptor.m_PadTop = 1;
981 descriptor.m_PadBottom = 1;
982 descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
984 armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, armnn::GetDataType<T>());
985 armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4 }, armnn::GetDataType<T>());
987 // Set quantization parameters if the requested type is a quantized type.
988 if(armnn::IsQuantizedType<T>())
990 inputTensorInfo.SetQuantizationScale(qScale);
991 inputTensorInfo.SetQuantizationOffset(qOffset);
992 outputTensorInfo.SetQuantizationScale(qScale);
993 outputTensorInfo.SetQuantizationOffset(qOffset);
996 auto input = MakeTensor<T, 4>(inputTensorInfo,
997 QuantizedVector<T>(qScale, qOffset, {
998 -1.0f, -2.0f, 3.0f, 4.0f,
999 -1.0f, -2.0f, 3.0f, 4.0f,
1000 1.0f, 2.0f, -3.0f, -4.0f,
1001 1.0f, 2.0f, -3.0f, -4.0f,
1004 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
1005 QuantizedVector<T>(qScale, qOffset, {
1006 -1.0f, 3.0f, 4.0f, 4.0f,
1007 2.0f, 3.0f, 4.0f, 4.0f,
1008 2.0f, 3.0f, 4.0f, 4.0f,
1009 2.0f, 2.0f, 2.0f, -3.0f,
1012 return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected);
1015 template<typename T>
1016 LayerTestResult<T, 4> IgnorePaddingSimpleAveragePooling2dTestCommon(armnn::IWorkloadFactory& workloadFactory,
1017 float qScale = 1.0f,
1018 int32_t qOffset = 0)
1020 armnn::Pooling2dDescriptor descriptor;
1021 descriptor.m_PoolType = armnn::PoolingAlgorithm::Average;
1022 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
1023 descriptor.m_StrideX = descriptor.m_StrideY = 2;
1024 descriptor.m_PadLeft = 1;
1025 descriptor.m_PadRight = 1;
1026 descriptor.m_PadTop = 1;
1027 descriptor.m_PadBottom = 1;
1028 descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
1030 armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, armnn::GetDataType<T>());
1031 armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, armnn::GetDataType<T>());
1033 // Set quantization parameters if the requested type is a quantized type.
1034 if(armnn::IsQuantizedType<T>())
1036 inputTensorInfo.SetQuantizationScale(qScale);
1037 inputTensorInfo.SetQuantizationOffset(qOffset);
1038 outputTensorInfo.SetQuantizationScale(qScale);
1039 outputTensorInfo.SetQuantizationOffset(qOffset);
1042 auto input = MakeTensor<T, 4>(inputTensorInfo,
1043 QuantizedVector<T>(qScale, qOffset, {
1044 12.0f, 20.0f, 32.0f, 40.0f,
1045 12.0f, 20.0f, 32.0f, 40.0f,
1046 12.0f, 20.0f, 32.0f, 40.0f,
1047 12.0f, 20.0f, 32.0f, 40.0f,
1050 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
1051 QuantizedVector<T>(qScale, qOffset, {
1057 return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected);
1060 template<typename T>
1061 LayerTestResult<T, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon(armnn::IWorkloadFactory& workloadFactory,
1062 float qScale = 1.0f,
1063 int32_t qOffset = 0)
1065 armnn::Pooling2dDescriptor descriptor;
1066 descriptor.m_PoolType = armnn::PoolingAlgorithm::Average;
1067 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
1068 descriptor.m_StrideX = descriptor.m_StrideY = 2;
1069 descriptor.m_PadLeft = 0;
1070 descriptor.m_PadRight = 0;
1071 descriptor.m_PadTop = 0;
1072 descriptor.m_PadBottom = 0;
1073 descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
1074 descriptor.m_OutputShapeRounding = armnn::OutputShapeRounding::Ceiling;
1076 armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4}, armnn::GetDataType<T>());
1077 armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, armnn::GetDataType<T>());
1079 // Set quantization parameters if the requested type is a quantized type.
1080 if(armnn::IsQuantizedType<T>())
1082 inputTensorInfo.SetQuantizationScale(qScale);
1083 inputTensorInfo.SetQuantizationOffset(qOffset);
1084 outputTensorInfo.SetQuantizationScale(qScale);
1085 outputTensorInfo.SetQuantizationOffset(qOffset);
1088 auto input = MakeTensor<T, 4>(inputTensorInfo,
1089 QuantizedVector<T>(qScale, qOffset, {
1090 1.0f, 2.0f, 3.0f, 4.0f,
1091 1.0f, 2.0f, 3.0f, 4.0f,
1092 1.0f, 2.0f, 3.0f, 4.0f,
1093 1.0f, 2.0f, 3.0f, 4.0f,
1096 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
1097 QuantizedVector<T>(qScale, qOffset, {
1102 return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected);
1105 template<typename T>
1106 LayerTestResult<T, 4> IgnorePaddingAveragePooling2dSize3TestCommon(armnn::IWorkloadFactory& workloadFactory,
1107 float qScale = 1.0f,
1108 int32_t qOffset = 0)
1110 armnn::Pooling2dDescriptor descriptor;
1111 descriptor.m_PoolType = armnn::PoolingAlgorithm::Average;
1112 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
1113 descriptor.m_StrideX = descriptor.m_StrideY = 1;
1114 descriptor.m_PadLeft = 1;
1115 descriptor.m_PadRight = 1;
1116 descriptor.m_PadTop = 1;
1117 descriptor.m_PadBottom = 1;
1118 descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
1120 armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, armnn::GetDataType<T>());
1121 armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4 }, armnn::GetDataType<T>());
1123 // Set quantization parameters if the requested type is a quantized type.
1124 if(armnn::IsQuantizedType<T>())
1126 inputTensorInfo.SetQuantizationScale(qScale);
1127 inputTensorInfo.SetQuantizationOffset(qOffset);
1128 outputTensorInfo.SetQuantizationScale(qScale);
1129 outputTensorInfo.SetQuantizationOffset(qOffset);
1132 auto input = MakeTensor<T, 4>(inputTensorInfo,
1133 QuantizedVector<T>(qScale, qOffset, {
1134 9.0f, 27.0f, 18.0f, 36.0f,
1135 18.0f, 9.0f, 18.0f, 9.0f,
1136 27.0f, 18.0f, 9.0f, 27.0f,
1137 9.0f, 27.0f, 9.0f, 18.0f,
1140 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
1141 QuantizedVector<T>(qScale, qOffset, {
1142 7.0f, 11.0f, 13.0f, 9.0f,
1143 12.0f, 17.0f, 19.0f, 13.0f,
1144 12.0f, 16.0f, 16.0f, 10.0f,
1145 9.0f, 11.0f, 12.0f, 7.0f,
1148 return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected);
1151 template<typename T>
1152 LayerTestResult<T, 4> IgnorePaddingSimpleL2Pooling2dTestCommon(armnn::IWorkloadFactory& workloadFactory,
1153 float qScale = 1.0f,
1154 int32_t qOffset = 0)
1156 armnn::Pooling2dDescriptor descriptor;
1157 descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
1158 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
1159 descriptor.m_StrideX = descriptor.m_StrideY = 2;
1160 descriptor.m_PadLeft = 1;
1161 descriptor.m_PadRight = 1;
1162 descriptor.m_PadTop = 1;
1163 descriptor.m_PadBottom = 1;
1164 descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
1166 armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, armnn::GetDataType<T>());
1167 armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, armnn::GetDataType<T>());
1169 // Set quantization parameters if the requested type is a quantized type.
1170 if(armnn::IsQuantizedType<T>())
1172 inputTensorInfo.SetQuantizationScale(qScale);
1173 inputTensorInfo.SetQuantizationOffset(qOffset);
1174 outputTensorInfo.SetQuantizationScale(qScale);
1175 outputTensorInfo.SetQuantizationOffset(qOffset);
1178 auto input = MakeTensor<T, 4>(inputTensorInfo,
1179 QuantizedVector<T>(qScale, qOffset, {
1180 2.0f, 4.0f, 8.0f, 16.0f,
1181 4.0f, 2.0f, 2.0f, 4.0f,
1182 8.0f, 2.0f, 4.0f, 2.0f,
1183 16.0f, 2.0f, 2.0f, 8.0f,
1186 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
1187 QuantizedVector<T>(qScale, qOffset, {
1188 1.0f, 4.4721f, 8.0f,
1189 4.4721f, 2.6457f, 2.236f,
1190 8.0f, 1.4142f, 4.0f,
1193 return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected);
1196 template<typename T>
1197 LayerTestResult<T, 4> IgnorePaddingL2Pooling2dSize3TestCommon(armnn::IWorkloadFactory& workloadFactory,
1198 float qScale = 1.0f,
1199 int32_t qOffset = 0)
1201 armnn::Pooling2dDescriptor descriptor;
1202 descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
1203 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
1204 descriptor.m_StrideX = descriptor.m_StrideY = 1;
1205 descriptor.m_PadLeft = 1;
1206 descriptor.m_PadRight = 1;
1207 descriptor.m_PadTop = 1;
1208 descriptor.m_PadBottom = 1;
1209 descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
1211 armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, armnn::GetDataType<T>());
1212 armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4 }, armnn::GetDataType<T>());
1214 // Set quantization parameters if the requested type is a quantized type.
1215 if(armnn::IsQuantizedType<T>())
1217 inputTensorInfo.SetQuantizationScale(qScale);
1218 inputTensorInfo.SetQuantizationOffset(qOffset);
1219 outputTensorInfo.SetQuantizationScale(qScale);
1220 outputTensorInfo.SetQuantizationOffset(qOffset);
1223 auto input = MakeTensor<T, 4>(inputTensorInfo,
1224 QuantizedVector<T>(qScale, qOffset, {
1225 1.0f, 2.0f, 3.0f, 4.0f,
1226 1.0f, 2.0f, 3.0f, 4.0f,
1227 1.0f, 2.0f, 3.0f, 4.0f,
1228 1.0f, 2.0f, 3.0f, 4.0f,
1231 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
1232 QuantizedVector<T>(qScale, qOffset, {
1233 1.0540f, 1.7638f, 2.5385f, 2.3570f,
1234 1.2909f, 2.1602f, 3.1091f, 2.8867f,
1235 1.2909f, 2.1602f, 3.1091f, 2.8867f,
1236 1.0540f, 1.7638f, 2.5385f, 2.3570f,
1239 return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected);