IVGCVSW-2093 Add SpaceToBatchNd layer and corresponding no-op factory implementations
[platform/upstream/armnn.git] / src / backends / backendsCommon / test / Pooling2dTestImpl.hpp
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #pragma once
6
7 #include "QuantizeHelper.hpp"
8
9 #include <armnn/ArmNN.hpp>
10
11 #include <Permute.hpp>
12
13 #include <backendsCommon/CpuTensorHandle.hpp>
14 #include <backendsCommon/WorkloadFactory.hpp>
15 #include <backendsCommon/WorkloadInfo.hpp>
16
17 #include <test/TensorHelpers.hpp>
18
19 #include <boost/numeric/conversion/cast.hpp>
20
21 #include <algorithm>
22 #include <string>
23
24 template<typename T>
25 LayerTestResult<T, 4> SimplePooling2dTestImpl(armnn::IWorkloadFactory& workloadFactory,
26                                               armnn::Pooling2dDescriptor descriptor,
27                                               float qScale,
28                                               int32_t qOffset,
29                                               const boost::multi_array<T, 4>& input,
30                                               const boost::multi_array<T, 4>& outputExpected)
31 {
32     const armnn::DataLayoutIndexed dataLayout = descriptor.m_DataLayout;
33     auto heightIndex = dataLayout.GetHeightIndex();
34     auto widthIndex = dataLayout.GetWidthIndex();
35     auto channelsIndex = dataLayout.GetChannelsIndex();
36
37     unsigned int inputHeight     = boost::numeric_cast<unsigned int>(input.shape()[heightIndex]);
38     unsigned int inputWidth      = boost::numeric_cast<unsigned int>(input.shape()[widthIndex]);
39     unsigned int inputChannels   = boost::numeric_cast<unsigned int>(input.shape()[channelsIndex]);
40     unsigned int inputBatchSize  = boost::numeric_cast<unsigned int>(input.shape()[0]);
41
42     unsigned int outputHeight    = boost::numeric_cast<unsigned int>(outputExpected.shape()[heightIndex]);
43     unsigned int outputWidth     = boost::numeric_cast<unsigned int>(outputExpected.shape()[widthIndex]);
44     unsigned int outputChannels  = boost::numeric_cast<unsigned int>(outputExpected.shape()[channelsIndex]);
45     unsigned int outputBatchSize = boost::numeric_cast<unsigned int>(outputExpected.shape()[0]);
46
47     armnn::TensorInfo inputTensorInfo = GetTensorInfo<T>(inputBatchSize, inputChannels, inputHeight,
48                                                          inputWidth, dataLayout);
49     armnn::TensorInfo outputTensorInfo = GetTensorInfo<T>(outputBatchSize, outputChannels, outputHeight,
50                                                           outputWidth, dataLayout);
51
52     // Set quantization parameters if the requested type is a quantized type.
53     if(armnn::IsQuantizedType<T>())
54     {
55         inputTensorInfo.SetQuantizationScale(qScale);
56         inputTensorInfo.SetQuantizationOffset(qOffset);
57         outputTensorInfo.SetQuantizationScale(qScale);
58         outputTensorInfo.SetQuantizationOffset(qOffset);
59     }
60
61     LayerTestResult<T, 4> result(outputTensorInfo);
62
63     std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
64     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
65
66     armnn::Pooling2dQueueDescriptor queueDescriptor;
67     queueDescriptor.m_Parameters = descriptor;
68     queueDescriptor.m_Parameters.m_DataLayout = dataLayout;
69
70     armnn::WorkloadInfo workloadInfo;
71     AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfo, inputHandle.get());
72     AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
73
74     // Don't execute if Pooling is not supported, as an exception will be raised.
75     armnn::BackendId backend = workloadFactory.GetBackendId();
76     const size_t reasonIfUnsupportedMaxLen = 255;
77     char reasonIfUnsupported[reasonIfUnsupportedMaxLen+1];
78     result.supported = armnn::IsPooling2dSupported(backend, inputTensorInfo, outputTensorInfo,
79                                                    queueDescriptor.m_Parameters,
80                                                    reasonIfUnsupported, reasonIfUnsupportedMaxLen);
81     if (!result.supported)
82     {
83         return result;
84     }
85
86     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePooling2d(queueDescriptor, workloadInfo);
87
88     inputHandle->Allocate();
89     outputHandle->Allocate();
90
91     CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
92
93     workload->Execute();
94
95     CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
96
97     result.outputExpected = outputExpected;
98
99     return result;
100 }
101
102 //
103 // Tests max pooling with the following parameters:
104 //
105 //   Pooling size: 3x3
106 //   Stride:       (2,4)
107 //   input size:   8x13
108 //   channels:     2
109 //   batch size:   2
110 //
111 template<typename T>
112 LayerTestResult<T, 4> SimpleMaxPooling2dSize3x3Stride2x4TestCommon(armnn::IWorkloadFactory& workloadFactory,
113                                                                    bool forceNoPadding,
114                                                                    float qScale = 1.0f,
115                                                                    int32_t qOffset = 0)
116 {
117     armnn::Pooling2dDescriptor descriptor;
118     descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
119     descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
120     descriptor.m_StrideX = 2;
121     descriptor.m_StrideY = 4;
122     // forceNoPadding is mainly used for compatibility with ARM Compute.
123     // As of 16/05/2017, it errors if padX or padY are equal to or greater than the pool size.
124     descriptor.m_PadLeft = descriptor.m_PadRight = forceNoPadding ? 0 : 3;
125     descriptor.m_PadTop = descriptor.m_PadBottom = 0;
126     descriptor.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
127     descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
128
129     unsigned int inputWidth = 8;
130     unsigned int inputHeight = 13;
131     unsigned int outputWidth =
132         (inputWidth + descriptor.m_PadLeft + descriptor.m_PadRight + descriptor.m_StrideX - descriptor.m_PoolWidth) /
133         descriptor.m_StrideX;
134     unsigned int outputHeight =
135         (inputHeight + descriptor.m_PadTop + descriptor.m_PadBottom + descriptor.m_StrideY - descriptor.m_PoolHeight) /
136         descriptor.m_StrideY;
137     unsigned int channels = 2;
138     unsigned int batchSize = 2;
139
140     armnn::TensorInfo inputTensorInfo({ batchSize, channels, inputHeight, inputWidth }, armnn::GetDataType<T>());
141     armnn::TensorInfo outputTensorInfo({ batchSize, channels, outputHeight, outputWidth }, armnn::GetDataType<T>());
142
143     // Set quantization parameters if the requested type is a quantized type.
144     if(armnn::IsQuantizedType<T>())
145     {
146         inputTensorInfo.SetQuantizationScale(qScale);
147         inputTensorInfo.SetQuantizationOffset(qOffset);
148         outputTensorInfo.SetQuantizationScale(qScale);
149         outputTensorInfo.SetQuantizationOffset(qOffset);
150     }
151
152     std::vector<float> singleChannelData({
153         0.0f, 4.0f, 8.0f, 1.0f, 6.0f, 4.0f, 5.0f, 8.0f,
154         1.0f, 1.0f, 6.0f, 0.0f, 3.0f, 7.0f, 4.0f, 7.0f,
155         8.0f, 5.0f, 0.0f, 0.0f, 8.0f, 3.0f, 4.0f, 3.0f,
156         8.0f, 2.0f, 5.0f, 4.0f, 1.0f, 9.0f, 2.0f, 0.0f,
157         5.0f, 4.0f, 5.0f, 0.0f, 0.0f, 0.0f, 7.0f, 2.0f,
158         1.0f, 2.0f, 6.0f, 2.0f, 7.0f, 9.0f, 5.0f, 2.0f,
159         9.0f, 7.0f, 3.0f, 1.0f, 3.0f, 4.0f, 8.0f, 3.0f,
160         1.0f, 0.0f, 0.0f, 5.0f, 5.0f, 4.0f, 2.0f, 0.0f,
161         6.0f, 4.0f, 3.0f, 6.0f, 9.0f, 5.0f, 5.0f, 6.0f,
162         8.0f, 7.0f, 9.0f, 6.0f, 1.0f, 4.0f, 1.0f, 9.0f,
163         7.0f, 1.0f, 9.0f, 2.0f, 9.0f, 9.0f, 8.0f, 1.0f,
164         4.0f, 4.0f, 5.0f, 9.0f, 2.0f, 6.0f, 6.0f, 4.0f,
165         3.0f, 5.0f, 4.0f, 0.0f, 1.0f, 5.0f, 9.0f, 7.0f,
166     });
167
168     // Constructs input data.
169     std::vector<float> inputData;
170     auto negator = [](float f) { return -f; };
171
172     // First image (two channels where the second channel is the negative of the first one).
173     inputData.insert(inputData.end(), singleChannelData.begin(), singleChannelData.end());
174     std::transform(singleChannelData.begin(), singleChannelData.end(), std::back_inserter(inputData), negator);
175
176     // Second image (same as first image).
177     inputData.insert(inputData.end(), singleChannelData.begin(), singleChannelData.end());
178     std::transform(singleChannelData.begin(), singleChannelData.end(), std::back_inserter(inputData), negator);
179
180     auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, inputData));
181
182     // These were calculated manually.
183     auto shape(GetTensorShapeAsArray<4>(outputTensorInfo));
184     boost::multi_array<T, 4> outputExpected(shape);
185     if (forceNoPadding)
186     {
187         outputExpected = MakeTensor<T, 4>(outputTensorInfo,
188             QuantizedVector<T>(qScale, qOffset, {
189                  8.0f,  8.0f,  8.0f,
190                  9.0f,  7.0f,  9.0f,
191                  9.0f,  9.0f,  9.0f,
192
193                  0.0f,  0.0f, -3.0f,
194                 -1.0f,  0.0f,  0.0f,
195                 -1.0f, -1.0f, -1.0f,
196
197                  8.0f,  8.0f,  8.0f,
198                  9.0f,  7.0f,  9.0f,
199                  9.0f,  9.0f,  9.0f,
200
201                  0.0f,  0.0f, -3.0f,
202                 -1.0f,  0.0f,  0.0f,
203                 -1.0f, -1.0f, -1.0f
204         }));
205     }
206     else
207     {
208         outputExpected = MakeTensor<T, 4>(outputTensorInfo,
209             QuantizedVector<T>(qScale, qOffset, {
210                 0.0f, 8.0f, 8.0f, 8.0f, 8.0f, 8.0f,
211                 0.0f, 9.0f, 7.0f, 9.0f, 9.0f, 3.0f,
212                 0.0f, 8.0f, 9.0f, 9.0f, 9.0f, 9.0f,
213
214                 0.0f, 0.0f, 0.0f, 0.0f,-3.0f, 0.0f,
215                 0.0f,-1.0f, 0.0f, 0.0f, 0.0f, 0.0f,
216                 0.0f,-1.0f,-1.0f,-1.0f,-1.0f, 0.0f,
217
218                 0.0f, 8.0f, 8.0f, 8.0f, 8.0f, 8.0f,
219                 0.0f, 9.0f, 7.0f, 9.0f, 9.0f, 3.0f,
220                 0.0f, 8.0f, 9.0f, 9.0f, 9.0f, 9.0f,
221
222                 0.0f, 0.0f, 0.0f, 0.0f,-3.0f, 0.0f,
223                 0.0f,-1.0f, 0.0f, 0.0f, 0.0f, 0.0f,
224                 0.0f,-1.0f,-1.0f,-1.0f,-1.0f, 0.0f
225         }));
226     }
227
228     return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected);
229 }
230
231 template<typename T>
232 LayerTestResult<T, 4> SimpleMaxPooling2dTestCommon(armnn::IWorkloadFactory& workloadFactory,
233                                                    const armnn::DataLayoutIndexed& dataLayout = armnn::DataLayout::NCHW,
234                                                    float qScale = 1.0f,
235                                                    int32_t qOffset = 0)
236 {
237     armnn::Pooling2dDescriptor descriptor;
238     descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
239     descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
240     descriptor.m_StrideX = descriptor.m_StrideY = 2;
241     descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
242     descriptor.m_DataLayout = dataLayout;
243
244     armnn::TensorInfo inputTensorInfo  = GetTensorInfo<T>(1, 2, 4, 4, dataLayout);
245     armnn::TensorInfo outputTensorInfo = GetTensorInfo<T>(1, 2, 2, 2, dataLayout);
246
247     // Set quantization parameters if the requested type is a quantized type.
248     if(armnn::IsQuantizedType<T>())
249     {
250         inputTensorInfo.SetQuantizationScale(qScale);
251         inputTensorInfo.SetQuantizationOffset(qOffset);
252         outputTensorInfo.SetQuantizationScale(qScale);
253         outputTensorInfo.SetQuantizationOffset(qOffset);
254     }
255
256     std::vector<T> inputData(
257         QuantizedVector<T>(qScale, qOffset, {
258              1.0f,  2.0f,  5.0f,  6.0f,
259              3.0f,  4.0f,  7.0f,  8.0f,
260              9.0f, 10.0f, 13.0f, 14.0f,
261             11.0f, 12.0f, 15.0f, 16.0f,
262
263             17.0f, 18.0f, 21.0f, 22.0f,
264             19.0f, 20.0f, 23.0f, 24.0f,
265             25.0f, 26.0f, 29.0f, 30.0f,
266             27.0f, 28.0f, 31.0f, 32.0f,
267         }));
268
269     std::vector<T> outputData(
270         QuantizedVector<T>(qScale, qOffset, {
271              4.0f,  8.0f,
272             12.0f, 16.0f,
273
274             20.0f, 24.0f,
275             28.0f, 32.0f,
276         }));
277
278     const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
279     if (dataLayout.GetDataLayout() == armnn::DataLayout::NHWC)
280     {
281         std::vector<T> tmp(inputData.size());
282         armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data());
283         inputData = tmp;
284
285         std::vector<T> tmp1(outputData.size());
286         armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data());
287         outputData = tmp1;
288     }
289
290     auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
291
292     auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputData);
293
294     return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected);
295 }
296
297 template<typename T>
298 LayerTestResult<T, 4> SimpleAveragePooling2dTestCommon(armnn::IWorkloadFactory& workloadFactory,
299                                                        armnn::DataLayoutIndexed dataLayout = armnn::DataLayout::NCHW,
300                                                        float qScale = 1.0f,
301                                                        int32_t qOffset = 0)
302 {
303     armnn::Pooling2dDescriptor descriptor;
304     descriptor.m_PoolType = armnn::PoolingAlgorithm::Average;
305     descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
306     descriptor.m_StrideX = descriptor.m_StrideY = 2;
307     descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
308     descriptor.m_DataLayout = dataLayout;
309
310     armnn::TensorInfo inputTensorInfo  = GetTensorInfo<T>(1, 2, 4, 4, dataLayout);
311     armnn::TensorInfo outputTensorInfo = GetTensorInfo<T>(1, 2, 2, 2, dataLayout);
312
313     // Set quantization parameters if the requested type is a quantized type.
314     if(armnn::IsQuantizedType<T>())
315     {
316         inputTensorInfo.SetQuantizationScale(qScale);
317         inputTensorInfo.SetQuantizationOffset(qOffset);
318         outputTensorInfo.SetQuantizationScale(qScale);
319         outputTensorInfo.SetQuantizationOffset(qOffset);
320     }
321
322     std::vector<T> inputData(
323         QuantizedVector<T>(qScale, qOffset, {
324              2.0f,  2.0f,  6.0f,  6.0f,
325              4.0f,  4.0f,  8.0f,  8.0f,
326             10.0f, 12.0f, 14.0f, 16.0f,
327             10.0f, 12.0f, 16.0f, 14.0f,
328
329             18.0f, 20.0f, 24.0f, 22.0f,
330             20.0f, 18.0f, 22.0f, 24.0f,
331             26.0f, 28.0f,  0.0f,  0.0f,
332             26.0f, 28.0f,  0.0f,  0.0f,
333         }));
334
335     std::vector<T> outputData(
336         QuantizedVector<T>(qScale, qOffset, {
337              3.0f,  7.0f,
338             11.0f, 15.0f,
339
340             19.0f, 23.0f,
341             27.0f,  0.0f,
342         }));
343
344     const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
345     if (dataLayout.GetDataLayout() == armnn::DataLayout::NHWC)
346     {
347         std::vector<T> tmp(inputData.size());
348         armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data());
349         inputData = tmp;
350
351         std::vector<T> tmp1(outputData.size());
352         armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data());
353         outputData = tmp1;
354     }
355
356     auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
357
358     auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputData);
359
360     return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected);
361 }
362
363 template<typename T>
364 LayerTestResult<T, 4> LargeTensorsAveragePooling2dTestCommon(armnn::IWorkloadFactory& workloadFactory,
365                                                              float qScale = 1.0f,
366                                                              int32_t qOffset = 0)
367 {
368     armnn::Pooling2dDescriptor descriptor;
369     descriptor.m_PoolType = armnn::PoolingAlgorithm::Average;
370     descriptor.m_PoolWidth = descriptor.m_PoolHeight = 100;
371     descriptor.m_StrideX = descriptor.m_StrideY = 5;
372     descriptor.m_PadLeft = 50;
373     descriptor.m_PadRight = 50;
374     descriptor.m_PadTop = 50;
375     descriptor.m_PadBottom = 50;
376     descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
377
378     armnn::TensorInfo inputTensorInfo({ 5, 3, 52, 60 }, armnn::GetDataType<T>());
379     armnn::TensorInfo outputTensorInfo({ 5, 3, 11, 13 }, armnn::GetDataType<T>());
380
381     // Set quantization parameters if the requested type is a quantized type.
382     if(armnn::IsQuantizedType<T>())
383     {
384         inputTensorInfo.SetQuantizationScale(qScale);
385         inputTensorInfo.SetQuantizationOffset(qOffset);
386         outputTensorInfo.SetQuantizationScale(qScale);
387         outputTensorInfo.SetQuantizationOffset(qOffset);
388     }
389
390     std::vector<T> inputVec;
391
392     for (unsigned int i = 0 ; i < inputTensorInfo.GetShape().GetNumElements(); ++i)
393     {
394         inputVec.push_back(1);
395     }
396
397     auto input = MakeTensor<T, 4>(inputTensorInfo, inputVec);
398
399     std::vector<T> outputVec;
400
401     for (unsigned int i = 0 ; i < outputTensorInfo.GetShape().GetNumElements(); ++i)
402     {
403         outputVec.push_back(1);
404     }
405
406     auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputVec);
407
408     return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected);
409 }
410
411 template<typename T>
412 LayerTestResult<T, 4> SimpleL2Pooling2dTestCommon(armnn::IWorkloadFactory& workloadFactory,
413                                                   armnn::DataLayoutIndexed dataLayout = armnn::DataLayout::NCHW,
414                                                   float qScale = 1.0f,
415                                                   int32_t qOffset = 0)
416 {
417     armnn::Pooling2dDescriptor descriptor;
418     descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
419     descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
420     descriptor.m_StrideX = descriptor.m_StrideY = 2;
421     descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
422     descriptor.m_DataLayout = dataLayout;
423
424     armnn::TensorInfo inputTensorInfo  = GetTensorInfo<T>(1, 2, 4, 4, dataLayout);
425     armnn::TensorInfo outputTensorInfo = GetTensorInfo<T>(1, 2, 2, 2, dataLayout);
426
427     std::vector<T> inputData(
428         QuantizedVector<T>(qScale, qOffset, {
429             1.0f, 7.0f, 5.0f, 5.0f,
430             1.0f, 7.0f, 5.0f, 5.0f,
431             3.0f, 3.0f, 1.0f, 1.0f,
432             3.0f, 3.0f, 1.0f, 1.0f,
433
434             1.0f, 7.0f, 0.0f, 0.0f,
435             1.0f, 7.0f, 2.0f, 0.0f,
436             0.0f, 2.0f, 1.0f, 1.0f,
437             0.0f, 0.0f, 1.0f, 1.0f,
438         }));
439
440     std::vector<T> outputData(
441         QuantizedVector<T>(qScale, qOffset, {
442             5.0f, 5.0f,
443             3.0f, 1.0f,
444
445             5.0f, 1.0f,
446             1.0f, 1.0f,
447         }));
448
449     const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
450     if (dataLayout.GetDataLayout() == armnn::DataLayout::NHWC)
451     {
452         std::vector<T> tmp(inputData.size());
453         armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data());
454         inputData = tmp;
455
456         std::vector<T> tmp1(outputData.size());
457         armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data());
458         outputData = tmp1;
459     }
460
461     auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
462
463     auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputData);
464
465     return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected);
466 }
467
468 template<typename T>
469 LayerTestResult<T, 4> L2Pooling2dSize3Stride1TestCommon(armnn::IWorkloadFactory& workloadFactory,
470                                                         float qScale = 1.0f,
471                                                         int32_t qOffset = 0)
472 {
473     armnn::Pooling2dDescriptor descriptor;
474     descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
475     descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
476     descriptor.m_StrideX = descriptor.m_StrideY = 1;
477     descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
478
479     armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, armnn::GetDataType<T>());
480     auto input = MakeTensor<T, 4>(inputTensorInfo,
481         QuantizedVector<T>(qScale, qOffset, {
482             2.0f, 1.0f, 5.0f, 2.0f,
483             1.0f, 2.0f, 2.0f, 1.0f,
484             5.0f, 4.0f, 1.0f, 5.0f,
485             2.0f, 1.0f, 5.0f, 2.0f,
486         }));
487
488     armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, armnn::GetDataType<T>());
489     auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
490         QuantizedVector<T>(qScale, qOffset, {
491             3.0f, 3.0f,
492             3.0f, 3.0f,
493         }));
494
495     return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected);
496 }
497
498 template<typename T>
499 LayerTestResult<T, 4> L2Pooling2dSize3Stride3TestCommon(armnn::IWorkloadFactory& workloadFactory,
500                                                         float qScale = 1.0f,
501                                                         int32_t qOffset = 0)
502 {
503     armnn::Pooling2dDescriptor descriptor;
504     descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
505     descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
506     descriptor.m_StrideX = descriptor.m_StrideY = 3;
507     descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
508
509     armnn::TensorInfo inputTensorInfo({ 1, 1, 9, 9 }, armnn::GetDataType<T>());
510     auto input = MakeTensor<T, 4>(inputTensorInfo,
511         QuantizedVector<T>(qScale, qOffset, {
512             2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
513             1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
514             5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
515             2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
516             1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
517             5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
518             2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
519             1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
520             5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
521         }));
522
523     armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, armnn::GetDataType<T>());
524     auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
525         QuantizedVector<T>(qScale, qOffset, {
526             3.0f, 3.0f, 3.0f,
527             3.0f, 3.0f, 3.0f,
528             3.0f, 3.0f, 3.0f,
529         }));
530
531     return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected);
532 }
533
534 template<typename T>
535 LayerTestResult<T, 4> L2Pooling2dSize3Stride4TestCommon(armnn::IWorkloadFactory& workloadFactory,
536                                                         float qScale = 1.0f,
537                                                         int32_t qOffset = 0)
538 {
539     armnn::Pooling2dDescriptor descriptor;
540     descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
541     descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
542     descriptor.m_StrideX = descriptor.m_StrideY = 4;
543     descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
544
545     armnn::TensorInfo inputTensorInfo({ 1, 1, 7, 7 }, armnn::GetDataType<T>());
546     auto input = MakeTensor<T, 4>(inputTensorInfo,
547         QuantizedVector<T>(qScale, qOffset, {
548             2.0f, 1.0f, 5.0f, 0.0f, 2.0f, 1.0f, 5.0f,
549             1.0f, 2.0f, 2.0f, 0.0f, 1.0f, 2.0f, 2.0f,
550             5.0f, 4.0f, 1.0f, 0.0f, 5.0f, 4.0f, 1.0f,
551             0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
552             2.0f, 1.0f, 5.0f, 0.0f, 2.0f, 1.0f, 5.0f,
553             1.0f, 2.0f, 2.0f, 0.0f, 1.0f, 2.0f, 2.0f,
554             5.0f, 4.0f, 1.0f, 0.0f, 5.0f, 4.0f, 1.0f,
555         }));
556
557     armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, armnn::GetDataType<T>());
558     auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
559         QuantizedVector<T>(qScale, qOffset, {
560             3.0f, 3.0f,
561             3.0f, 3.0f,
562         }));
563
564     return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected);
565 }
566
567 template<typename T>
568 LayerTestResult<T, 4> L2Pooling2dSize7TestCommon(armnn::IWorkloadFactory& workloadFactory,
569                                                  float qScale = 1.0f,
570                                                  int32_t qOffset = 0)
571 {
572     armnn::Pooling2dDescriptor descriptor;
573     descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
574     descriptor.m_PoolWidth = descriptor.m_PoolHeight = 7;
575     descriptor.m_StrideX = descriptor.m_StrideY = 7;
576     descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
577
578     armnn::TensorInfo inputTensorInfo({ 1, 1, 7, 7 }, armnn::GetDataType<T>());
579     auto input = MakeTensor<T, 4>(inputTensorInfo,
580         QuantizedVector<T>(qScale, qOffset, {
581             1.0f, 0.0f, 2.0f, 0.0f,  3.0f, 0.0f, 4.0f,
582             0.0f, 0.0f, 0.0f, 0.0f,  0.0f, 0.0f, 0.0f,
583             0.0f, 5.0f, 0.0f, 6.0f,  0.0f, 7.0f, 0.0f,
584             8.0f, 0.0f, 9.0f, 0.0f, 10.0f, 0.0f, 5.0f,
585             0.0f, 5.0f, 0.0f, 2.0f,  0.0f, 1.0f, 1.0f,
586             0.0f, 0.0f, 0.0f, 0.0f,  0.0f, 0.0f, 0.0f,
587             0.0f, 0.0f, 0.0f, 0.0f,  0.0f, 0.0f, 0.0f,
588         }));
589
590     armnn::TensorInfo outputTensorInfo({ 1, 1, 1, 1 }, armnn::GetDataType<T>());
591     auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
592         QuantizedVector<T>(qScale, qOffset, {
593             3.0f,
594         }));
595
596     return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected);
597 }
598
599 template<typename T>
600 LayerTestResult<T, 4> L2Pooling2dSize9TestCommon(armnn::IWorkloadFactory& workloadFactory,
601                                                  float qScale = 1.0f,
602                                                  int32_t qOffset = 0)
603 {
604     armnn::Pooling2dDescriptor descriptor;
605     descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
606     descriptor.m_PoolWidth = descriptor.m_PoolHeight = 9;
607     descriptor.m_StrideX = descriptor.m_StrideY = 9;
608     descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
609
610     armnn::TensorInfo inputTensorInfo({ 1, 1, 9, 9 }, armnn::GetDataType<T>());
611     auto input = MakeTensor<T, 4>(inputTensorInfo,
612         QuantizedVector<T>(qScale, qOffset, {
613             2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
614             1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
615             5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
616             2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
617             1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
618             5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
619             2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
620             1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
621             5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
622         }));
623
624     armnn::TensorInfo outputTensorInfo({ 1, 1, 1, 1 }, armnn::GetDataType<T>());
625     auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
626         QuantizedVector<T>(qScale, qOffset, {
627             3.0f,
628         }));
629
630     return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected);
631 }
632
633 template<typename T>
634 LayerTestResult<T, 4> AsymmetricNonSquarePooling2dTestCommon(armnn::IWorkloadFactory& workloadFactory,
635                                                              float qScale = 1.0f,
636                                                              int32_t qOffset = 0)
637 {
638     armnn::TensorInfo inputTensorInfo({ 1, 1, 1, 3 }, armnn::GetDataType<T>());
639     armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, armnn::GetDataType<T>());
640
641     armnn::Pooling2dDescriptor descriptor;
642     descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
643     descriptor.m_PoolWidth = 2;
644     descriptor.m_PoolHeight = 3;
645     descriptor.m_StrideX = 2;
646     descriptor.m_StrideY = 1;
647     descriptor.m_PadLeft = 2;
648     descriptor.m_PadRight = 0;
649     descriptor.m_PadTop = 1;
650     descriptor.m_PadBottom = 2;
651     descriptor.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
652     descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
653
654     // Construct input data.
655     auto input = MakeTensor<T, 4>(inputTensorInfo,
656         QuantizedVector<T>(qScale, qOffset, {
657             1.0f, 3.0f, 4.0f,
658         }));
659
660     // These were calculated manually.
661     auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
662         QuantizedVector<T>(qScale, qOffset, {
663             0.0f, 3.0f, 0.0f, 3.0f,
664         }));
665
666     return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected);
667 }
668
669 template<typename T>
670 LayerTestResult<T, 4> ComparePooling2dTestCommon(armnn::IWorkloadFactory& workloadFactory,
671                                                  armnn::IWorkloadFactory& refWorkloadFactory,
672                                                  armnn::PoolingAlgorithm poolingType,
673                                                  float qScale = 1.0f,
674                                                  int32_t qOffset = 0)
675 {
676     const unsigned int inputWidth = 16;
677     const unsigned int inputHeight = 32;
678     const unsigned int channelCount = 2;
679     const unsigned int batchSize = 5;
680
681     const unsigned int poolSize = 3;
682     const unsigned int strideX = 2;
683     const unsigned int strideY = 4;
684     const unsigned int padX = 0;
685     const unsigned int padY = 0;
686
687     const unsigned int outputWidth = (inputWidth + 2 * padX + strideX - poolSize) / strideX;
688     const unsigned int outputHeight = (inputHeight + 2 * padY + strideY - poolSize) / strideY;
689
690     armnn::TensorInfo inputTensorInfo;
691     armnn::TensorInfo outputTensorInfo;
692
693     unsigned int inputShape[] = { batchSize, channelCount, inputHeight, inputWidth };
694     unsigned int outputShape[] = { batchSize, channelCount, outputHeight, outputWidth };
695
696     inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::GetDataType<T>());
697     outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::GetDataType<T>());
698
699     // Set quantization parameters if the requested type is a quantized type.
700     if(armnn::IsQuantizedType<T>())
701     {
702         inputTensorInfo.SetQuantizationScale(qScale);
703         inputTensorInfo.SetQuantizationOffset(qOffset);
704         outputTensorInfo.SetQuantizationScale(qScale);
705         outputTensorInfo.SetQuantizationOffset(qOffset);
706     }
707
708     boost::multi_array<T, 4> input = MakeRandomTensor<T, 4>(inputTensorInfo, 81715);
709
710     LayerTestResult<T, 4> comparisonResult(outputTensorInfo);
711
712     std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
713     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
714
715     armnn::Pooling2dQueueDescriptor data;
716     armnn::WorkloadInfo info;
717     AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
718     AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
719     data.m_Parameters.m_PoolType = poolingType;
720     data.m_Parameters.m_PoolWidth = poolSize;
721     data.m_Parameters.m_PoolHeight = poolSize;
722     data.m_Parameters.m_StrideX = strideX;
723     data.m_Parameters.m_StrideY = strideY;
724     data.m_Parameters.m_PadLeft = padX;
725     data.m_Parameters.m_PadRight = padX;
726     data.m_Parameters.m_PadTop = padY;
727     data.m_Parameters.m_PadBottom = padY;
728     data.m_Parameters.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
729
730     std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
731     std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refWorkloadFactory.CreateTensorHandle(inputTensorInfo);
732
733     // Don't execute if Pooling is not supported, as an exception will be raised.
734     armnn::BackendId backend = workloadFactory.GetBackendId();
735     const size_t reasonIfUnsupportedMaxLen = 255;
736     char reasonIfUnsupported[reasonIfUnsupportedMaxLen+1];
737     comparisonResult.supported = armnn::IsPooling2dSupported(backend, inputTensorInfo, outputTensorInfo,
738                                                              data.m_Parameters,
739                                                              reasonIfUnsupported, reasonIfUnsupportedMaxLen);
740     if (!comparisonResult.supported)
741     {
742         return comparisonResult;
743     }
744
745     armnn::Pooling2dQueueDescriptor refData = data;
746     armnn::WorkloadInfo refInfo = info;
747     SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
748     SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
749
750     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePooling2d(data, info);
751     std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreatePooling2d(refData, refInfo);
752
753     outputHandleRef->Allocate();
754     inputHandleRef->Allocate();
755     inputHandle->Allocate();
756     outputHandle->Allocate();
757
758     CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
759     CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]);
760
761     workload->Execute();
762     workloadRef->Execute();
763
764     CopyDataFromITensorHandle(&comparisonResult.output[0][0][0][0], outputHandle.get());
765     CopyDataFromITensorHandle(&comparisonResult.outputExpected[0][0][0][0], outputHandleRef.get());
766
767     return comparisonResult;
768 }
769
770 //
771 // Tests max pooling with the following parameters:
772 //
773 //   Pooling size: 2x2
774 //   Stride:       (2,2)
775 //   input size:   4x4
776 //   channels:     1
777 //   batch size:   1
778 //
779 template<typename T>
780 LayerTestResult<T, 4> SimpleMaxPooling2dSize2x2Stride2x2TestCommon(armnn::IWorkloadFactory& workloadFactory,
781                                                                    bool forceNoPadding,
782                                                                    float qScale = 1.0f,
783                                                                    int32_t qOffset = 0)
784 {
785     armnn::Pooling2dDescriptor descriptor;
786     descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
787     descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
788     descriptor.m_StrideX = 2;
789     descriptor.m_StrideY = 2;
790     descriptor.m_PadLeft = descriptor.m_PadRight = forceNoPadding ? 0 : 3;
791     descriptor.m_PadTop = descriptor.m_PadBottom = 0;
792     descriptor.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
793     descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
794
795     unsigned int inputWidth = 4;
796     unsigned int inputHeight = 4;
797     unsigned int outputWidth =
798         (inputWidth + descriptor.m_PadLeft + descriptor.m_PadRight + descriptor.m_StrideX - descriptor.m_PoolWidth) /
799         descriptor.m_StrideX;
800     unsigned int outputHeight =
801         (inputHeight + descriptor.m_PadTop + descriptor.m_PadBottom + descriptor.m_StrideY - descriptor.m_PoolHeight) /
802         descriptor.m_StrideY;
803     unsigned int channels = 1;
804     unsigned int batchSize = 1;
805
806     std::vector<float> inputData = {
807         510.0f, 222.0f, 780.0f, 654.0f,
808         141.0f, 276.0f,  15.0f, 546.0f,
809         303.0f, 618.0f, 582.0f, 339.0f,
810         438.0f, 564.0f, 573.0f, 402.0f
811     };
812
813     // Note that left and right edges will be 0.f, due to the 2x2 max pooling only accessing zeros here.
814     std::vector<float> expectedOutputDataWithPadding = {
815         0.0f, 510.0f, 780.0f, 654.0f, 0.0f,
816         0.0f, 438.0f, 618.0f, 402.0f, 0.0f
817     };
818
819     std::vector<float> expectedOutputDataNoPadding = {
820         510.0f, 780.0f,
821         618.0f, 582.0f
822     };
823
824     armnn::TensorInfo inputTensorInfo({ batchSize, channels, inputHeight, inputWidth }, armnn::GetDataType<T>());
825
826     // Scale and offset should match input - we're just calculating maximum values.
827     armnn::TensorInfo outputTensorInfo({ batchSize, channels, outputHeight, outputWidth }, armnn::GetDataType<T>());
828
829     // Set quantization parameters if the requested type is a quantized type.
830     if(armnn::IsQuantizedType<T>())
831     {
832         inputTensorInfo.SetQuantizationScale(qScale);
833         inputTensorInfo.SetQuantizationOffset(qOffset);
834         outputTensorInfo.SetQuantizationScale(qScale);
835         outputTensorInfo.SetQuantizationOffset(qOffset);
836     }
837
838     auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, inputData));
839
840     auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
841         forceNoPadding ? QuantizedVector<T>(qScale, qOffset, expectedOutputDataNoPadding) :
842                          QuantizedVector<T>(qScale, qOffset, expectedOutputDataWithPadding));
843
844     return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected);
845 }
846
847 //
848 // Tests max pooling with the following parameters:
849 //
850 //   Pooling size: 3x2
851 //   Stride:       (2,2)
852 //   input size:   3x2
853 //   channels:     1
854 //   batch size:   1
855 //
856 template<typename T>
857 LayerTestResult<T, 4> IgnorePaddingAveragePooling2dSize3x2Stride2x2TestCommon(
858         armnn::IWorkloadFactory& workloadFactory,
859         bool forceNoPadding,
860         float qScale = 1.0f,
861         int32_t qOffset = 0)
862 {
863     armnn::Pooling2dDescriptor descriptor;
864     descriptor.m_PoolType = armnn::PoolingAlgorithm::Average;
865     descriptor.m_PoolWidth = 3;
866     descriptor.m_PoolHeight = 2;
867     descriptor.m_StrideX = 2;
868     descriptor.m_StrideY = 2;
869     descriptor.m_PadLeft = (forceNoPadding) ? 0 : 1;
870     descriptor.m_PadRight = descriptor.m_PadLeft;
871     descriptor.m_PadTop = 0;
872     descriptor.m_PadBottom = 0;
873     descriptor.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
874     descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
875
876     unsigned int inputWidth = 3;
877     unsigned int inputHeight = 2;
878     unsigned int outputWidth =
879         (inputWidth + descriptor.m_PadLeft + descriptor.m_PadRight + descriptor.m_StrideX - descriptor.m_PoolWidth) /
880         descriptor.m_StrideX;
881     unsigned int outputHeight =
882         (inputHeight + descriptor.m_PadTop + descriptor.m_PadBottom + descriptor.m_StrideY - descriptor.m_PoolHeight) /
883         descriptor.m_StrideY;
884     unsigned int channels = 1;
885     unsigned int batchSize = 1;
886
887     std::vector<float> inputData = {
888         3.0f, 6.0f, 9.0f,
889         12.0f, 15.0f, 18.0f,
890     };
891
892     std::vector<float> expectedOutputDataWithPadding = {
893         6.0f, 8.0f,
894     };
895
896     std::vector<float> expectedOutputDataNoPadding = {
897         10.5f,
898     };
899
900     armnn::TensorInfo inputTensorInfo({ batchSize, channels, inputHeight, inputWidth }, armnn::GetDataType<T>());
901
902     // Scale and offset should match input - we're just calculating average values.
903     armnn::TensorInfo outputTensorInfo({ batchSize, channels, outputHeight, outputWidth }, armnn::GetDataType<T>());
904
905     // Set quantization parameters if the requested type is a quantized type.
906     if(armnn::IsQuantizedType<T>())
907     {
908         inputTensorInfo.SetQuantizationScale(qScale);
909         inputTensorInfo.SetQuantizationOffset(qOffset);
910         outputTensorInfo.SetQuantizationScale(qScale);
911         outputTensorInfo.SetQuantizationOffset(qOffset);
912     }
913
914     auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, inputData));
915
916     auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
917         forceNoPadding ? QuantizedVector<T>(qScale, qOffset, expectedOutputDataNoPadding) :
918                          QuantizedVector<T>(qScale, qOffset, expectedOutputDataWithPadding));
919
920     return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected);
921 }
922
923
924 template<typename T>
925 LayerTestResult<T, 4> IgnorePaddingSimpleMaxPooling2dTestCommon(armnn::IWorkloadFactory& workloadFactory,
926                                                             float qScale = 1.0f,
927                                                             int32_t qOffset = 0)
928 {
929     armnn::Pooling2dDescriptor descriptor;
930     descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
931     descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
932     descriptor.m_StrideX = descriptor.m_StrideY = 2;
933     descriptor.m_PadLeft = 1;
934     descriptor.m_PadRight = 1;
935     descriptor.m_PadTop = 1;
936     descriptor.m_PadBottom = 1;
937     descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
938
939     armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, armnn::GetDataType<T>());
940     armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, armnn::GetDataType<T>());
941
942     // Set quantization parameters if the requested type is a quantized type.
943     if(armnn::IsQuantizedType<T>())
944     {
945         inputTensorInfo.SetQuantizationScale(qScale);
946         inputTensorInfo.SetQuantizationOffset(qOffset);
947         outputTensorInfo.SetQuantizationScale(qScale);
948         outputTensorInfo.SetQuantizationOffset(qOffset);
949     }
950
951     auto input = MakeTensor<T, 4>(inputTensorInfo,
952         QuantizedVector<T>(qScale, qOffset, {
953             -1.0f, -2.0f,  3.0f,  4.0f,
954             -1.0f, -2.0f,  3.0f,  4.0f,
955              1.0f,  2.0f, -3.0f, -4.0f,
956              1.0f,  2.0f, -3.0f, -4.0f,
957         }));
958
959     auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
960         QuantizedVector<T>(qScale, qOffset, {
961             -1.0f,  3.0f,  4.0f,
962              1.0f,  3.0f,  4.0f,
963              1.0f,  2.0f, -4.0f,
964         }));
965
966     return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected);
967 }
968
969 template<typename T>
970 LayerTestResult<T, 4> IgnorePaddingMaxPooling2dSize3TestCommon(armnn::IWorkloadFactory& workloadFactory,
971                                                             float qScale = 1.0f,
972                                                             int32_t qOffset = 0)
973 {
974     armnn::Pooling2dDescriptor descriptor;
975     descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
976     descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
977     descriptor.m_StrideX = descriptor.m_StrideY = 1;
978     descriptor.m_PadLeft = 1;
979     descriptor.m_PadRight = 1;
980     descriptor.m_PadTop = 1;
981     descriptor.m_PadBottom = 1;
982     descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
983
984     armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, armnn::GetDataType<T>());
985     armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4 }, armnn::GetDataType<T>());
986
987     // Set quantization parameters if the requested type is a quantized type.
988     if(armnn::IsQuantizedType<T>())
989     {
990         inputTensorInfo.SetQuantizationScale(qScale);
991         inputTensorInfo.SetQuantizationOffset(qOffset);
992         outputTensorInfo.SetQuantizationScale(qScale);
993         outputTensorInfo.SetQuantizationOffset(qOffset);
994     }
995
996     auto input = MakeTensor<T, 4>(inputTensorInfo,
997         QuantizedVector<T>(qScale, qOffset, {
998             -1.0f, -2.0f,  3.0f,  4.0f,
999             -1.0f, -2.0f,  3.0f,  4.0f,
1000              1.0f,  2.0f, -3.0f, -4.0f,
1001              1.0f,  2.0f, -3.0f, -4.0f,
1002         }));
1003
1004     auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
1005         QuantizedVector<T>(qScale, qOffset, {
1006             -1.0f,  3.0f,  4.0f,  4.0f,
1007              2.0f,  3.0f,  4.0f,  4.0f,
1008              2.0f,  3.0f,  4.0f,  4.0f,
1009              2.0f,  2.0f,  2.0f, -3.0f,
1010         }));
1011
1012     return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected);
1013 }
1014
1015 template<typename T>
1016 LayerTestResult<T, 4> IgnorePaddingSimpleAveragePooling2dTestCommon(armnn::IWorkloadFactory& workloadFactory,
1017                                                                  float qScale = 1.0f,
1018                                                                  int32_t qOffset = 0)
1019 {
1020     armnn::Pooling2dDescriptor descriptor;
1021     descriptor.m_PoolType = armnn::PoolingAlgorithm::Average;
1022     descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
1023     descriptor.m_StrideX = descriptor.m_StrideY = 2;
1024     descriptor.m_PadLeft = 1;
1025     descriptor.m_PadRight = 1;
1026     descriptor.m_PadTop = 1;
1027     descriptor.m_PadBottom = 1;
1028     descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
1029
1030     armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, armnn::GetDataType<T>());
1031     armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, armnn::GetDataType<T>());
1032
1033     // Set quantization parameters if the requested type is a quantized type.
1034     if(armnn::IsQuantizedType<T>())
1035     {
1036         inputTensorInfo.SetQuantizationScale(qScale);
1037         inputTensorInfo.SetQuantizationOffset(qOffset);
1038         outputTensorInfo.SetQuantizationScale(qScale);
1039         outputTensorInfo.SetQuantizationOffset(qOffset);
1040     }
1041
1042     auto input = MakeTensor<T, 4>(inputTensorInfo,
1043         QuantizedVector<T>(qScale, qOffset, {
1044             12.0f, 20.0f, 32.0f, 40.0f,
1045             12.0f, 20.0f, 32.0f, 40.0f,
1046             12.0f, 20.0f, 32.0f, 40.0f,
1047             12.0f, 20.0f, 32.0f, 40.0f,
1048         }));
1049
1050     auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
1051         QuantizedVector<T>(qScale, qOffset, {
1052             3.0f,  13.0f,  10.0f,
1053             6.0f,  26.0f,  20.0f,
1054             3.0f,  13.0f,  10.0f,
1055         }));
1056
1057     return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected);
1058 }
1059
1060 template<typename T>
1061 LayerTestResult<T, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon(armnn::IWorkloadFactory& workloadFactory,
1062                                                                  float qScale = 1.0f,
1063                                                                  int32_t qOffset = 0)
1064 {
1065     armnn::Pooling2dDescriptor descriptor;
1066     descriptor.m_PoolType = armnn::PoolingAlgorithm::Average;
1067     descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
1068     descriptor.m_StrideX = descriptor.m_StrideY = 2;
1069     descriptor.m_PadLeft = 0;
1070     descriptor.m_PadRight = 0;
1071     descriptor.m_PadTop = 0;
1072     descriptor.m_PadBottom = 0;
1073     descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
1074     descriptor.m_OutputShapeRounding = armnn::OutputShapeRounding::Ceiling;
1075
1076     armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4}, armnn::GetDataType<T>());
1077     armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, armnn::GetDataType<T>());
1078
1079     // Set quantization parameters if the requested type is a quantized type.
1080     if(armnn::IsQuantizedType<T>())
1081     {
1082         inputTensorInfo.SetQuantizationScale(qScale);
1083         inputTensorInfo.SetQuantizationOffset(qOffset);
1084         outputTensorInfo.SetQuantizationScale(qScale);
1085         outputTensorInfo.SetQuantizationOffset(qOffset);
1086     }
1087
1088     auto input = MakeTensor<T, 4>(inputTensorInfo,
1089         QuantizedVector<T>(qScale, qOffset, {
1090             1.0f, 2.0f, 3.0f, 4.0f,
1091             1.0f, 2.0f, 3.0f, 4.0f,
1092             1.0f, 2.0f, 3.0f, 4.0f,
1093             1.0f, 2.0f, 3.0f, 4.0f,
1094         }));
1095
1096     auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
1097         QuantizedVector<T>(qScale, qOffset, {
1098             2.0f, 3.5f,
1099             2.0f, 3.5f
1100         }));
1101
1102     return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected);
1103 }
1104
1105 template<typename T>
1106 LayerTestResult<T, 4> IgnorePaddingAveragePooling2dSize3TestCommon(armnn::IWorkloadFactory& workloadFactory,
1107                                                                 float qScale = 1.0f,
1108                                                                 int32_t qOffset = 0)
1109 {
1110     armnn::Pooling2dDescriptor descriptor;
1111     descriptor.m_PoolType = armnn::PoolingAlgorithm::Average;
1112     descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
1113     descriptor.m_StrideX = descriptor.m_StrideY = 1;
1114     descriptor.m_PadLeft = 1;
1115     descriptor.m_PadRight = 1;
1116     descriptor.m_PadTop = 1;
1117     descriptor.m_PadBottom = 1;
1118     descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
1119
1120     armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, armnn::GetDataType<T>());
1121     armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4 }, armnn::GetDataType<T>());
1122
1123     // Set quantization parameters if the requested type is a quantized type.
1124     if(armnn::IsQuantizedType<T>())
1125     {
1126         inputTensorInfo.SetQuantizationScale(qScale);
1127         inputTensorInfo.SetQuantizationOffset(qOffset);
1128         outputTensorInfo.SetQuantizationScale(qScale);
1129         outputTensorInfo.SetQuantizationOffset(qOffset);
1130     }
1131
1132     auto input = MakeTensor<T, 4>(inputTensorInfo,
1133         QuantizedVector<T>(qScale, qOffset, {
1134             9.0f,   27.0f,  18.0f,  36.0f,
1135             18.0f,   9.0f,  18.0f,   9.0f,
1136             27.0f,  18.0f,   9.0f,  27.0f,
1137             9.0f,   27.0f,   9.0f,  18.0f,
1138         }));
1139
1140     auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
1141         QuantizedVector<T>(qScale, qOffset, {
1142              7.0f,  11.0f,  13.0f, 9.0f,
1143             12.0f,  17.0f,  19.0f, 13.0f,
1144             12.0f,  16.0f,  16.0f, 10.0f,
1145              9.0f,  11.0f,  12.0f, 7.0f,
1146         }));
1147
1148     return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected);
1149 }
1150
1151 template<typename T>
1152 LayerTestResult<T, 4> IgnorePaddingSimpleL2Pooling2dTestCommon(armnn::IWorkloadFactory& workloadFactory,
1153                                                             float qScale = 1.0f,
1154                                                             int32_t qOffset = 0)
1155 {
1156     armnn::Pooling2dDescriptor descriptor;
1157     descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
1158     descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
1159     descriptor.m_StrideX = descriptor.m_StrideY = 2;
1160     descriptor.m_PadLeft = 1;
1161     descriptor.m_PadRight = 1;
1162     descriptor.m_PadTop = 1;
1163     descriptor.m_PadBottom = 1;
1164     descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
1165
1166     armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, armnn::GetDataType<T>());
1167     armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, armnn::GetDataType<T>());
1168
1169     // Set quantization parameters if the requested type is a quantized type.
1170     if(armnn::IsQuantizedType<T>())
1171     {
1172         inputTensorInfo.SetQuantizationScale(qScale);
1173         inputTensorInfo.SetQuantizationOffset(qOffset);
1174         outputTensorInfo.SetQuantizationScale(qScale);
1175         outputTensorInfo.SetQuantizationOffset(qOffset);
1176     }
1177
1178     auto input = MakeTensor<T, 4>(inputTensorInfo,
1179         QuantizedVector<T>(qScale, qOffset, {
1180             2.0f,  4.0f, 8.0f, 16.0f,
1181             4.0f,  2.0f, 2.0f, 4.0f,
1182             8.0f,  2.0f, 4.0f, 2.0f,
1183             16.0f, 2.0f, 2.0f, 8.0f,
1184         }));
1185
1186     auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
1187         QuantizedVector<T>(qScale, qOffset, {
1188                1.0f,     4.4721f,   8.0f,
1189             4.4721f,     2.6457f,   2.236f,
1190                8.0f,     1.4142f,   4.0f,
1191         }));
1192
1193     return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected);
1194 }
1195
1196 template<typename T>
1197 LayerTestResult<T, 4> IgnorePaddingL2Pooling2dSize3TestCommon(armnn::IWorkloadFactory& workloadFactory,
1198                                                            float qScale = 1.0f,
1199                                                            int32_t qOffset = 0)
1200 {
1201     armnn::Pooling2dDescriptor descriptor;
1202     descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
1203     descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
1204     descriptor.m_StrideX = descriptor.m_StrideY = 1;
1205     descriptor.m_PadLeft = 1;
1206     descriptor.m_PadRight = 1;
1207     descriptor.m_PadTop = 1;
1208     descriptor.m_PadBottom = 1;
1209     descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
1210
1211     armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, armnn::GetDataType<T>());
1212     armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4 }, armnn::GetDataType<T>());
1213
1214     // Set quantization parameters if the requested type is a quantized type.
1215     if(armnn::IsQuantizedType<T>())
1216     {
1217         inputTensorInfo.SetQuantizationScale(qScale);
1218         inputTensorInfo.SetQuantizationOffset(qOffset);
1219         outputTensorInfo.SetQuantizationScale(qScale);
1220         outputTensorInfo.SetQuantizationOffset(qOffset);
1221     }
1222
1223     auto input = MakeTensor<T, 4>(inputTensorInfo,
1224         QuantizedVector<T>(qScale, qOffset, {
1225             1.0f, 2.0f, 3.0f, 4.0f,
1226             1.0f, 2.0f, 3.0f, 4.0f,
1227             1.0f, 2.0f, 3.0f, 4.0f,
1228             1.0f, 2.0f, 3.0f, 4.0f,
1229         }));
1230
1231     auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
1232         QuantizedVector<T>(qScale, qOffset, {
1233             1.0540f, 1.7638f, 2.5385f, 2.3570f,
1234             1.2909f, 2.1602f, 3.1091f, 2.8867f,
1235             1.2909f, 2.1602f, 3.1091f, 2.8867f,
1236             1.0540f, 1.7638f, 2.5385f, 2.3570f,
1237         }));
1238
1239     return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected);
1240 }