IVGCVSW-4246 Clean build of LayerTests with -Wextra
[platform/upstream/armnn.git] / src / backends / backendsCommon / test / layerTests / Pooling2dTestImpl.cpp
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5
6 #include "Pooling2dTestImpl.hpp"
7
8 #include <QuantizeHelper.hpp>
9 #include <ResolveType.hpp>
10
11 #include <armnn/LayerSupport.hpp>
12
13 #include <armnnUtils/TensorUtils.hpp>
14 #include <armnnUtils/DataLayoutIndexed.hpp>
15 #include <armnnUtils/Permute.hpp>
16
17 #include <backendsCommon/WorkloadInfo.hpp>
18
19 #include <backendsCommon/test/TensorCopyUtils.hpp>
20 #include <backendsCommon/test/WorkloadTestUtils.hpp>
21
22 #include <test/TensorHelpers.hpp>
23
24 #include <boost/numeric/conversion/cast.hpp>
25
26 namespace
27 {
28
29 using namespace armnnUtils;
30
31 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
32 LayerTestResult<T, 4> SimplePooling2dTestImpl(
33     armnn::IWorkloadFactory& workloadFactory,
34     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
35     armnn::Pooling2dDescriptor descriptor,
36     float qScale,
37     int32_t qOffset,
38     const boost::multi_array<T, 4>& input,
39     const boost::multi_array<T, 4>& outputExpected)
40 {
41     boost::ignore_unused(memoryManager);
42     const armnn::DataLayout dataLayout = descriptor.m_DataLayout;
43     const armnnUtils::DataLayoutIndexed dimensionIndices = dataLayout;
44     auto heightIndex = dimensionIndices.GetHeightIndex();
45     auto widthIndex = dimensionIndices.GetWidthIndex();
46     auto channelsIndex = dimensionIndices.GetChannelsIndex();
47
48     unsigned int inputHeight     = boost::numeric_cast<unsigned int>(input.shape()[heightIndex]);
49     unsigned int inputWidth      = boost::numeric_cast<unsigned int>(input.shape()[widthIndex]);
50     unsigned int inputChannels   = boost::numeric_cast<unsigned int>(input.shape()[channelsIndex]);
51     unsigned int inputBatchSize  = boost::numeric_cast<unsigned int>(input.shape()[0]);
52
53     unsigned int outputHeight    = boost::numeric_cast<unsigned int>(outputExpected.shape()[heightIndex]);
54     unsigned int outputWidth     = boost::numeric_cast<unsigned int>(outputExpected.shape()[widthIndex]);
55     unsigned int outputChannels  = boost::numeric_cast<unsigned int>(outputExpected.shape()[channelsIndex]);
56     unsigned int outputBatchSize = boost::numeric_cast<unsigned int>(outputExpected.shape()[0]);
57
58     armnn::TensorInfo inputTensorInfo  = armnnUtils::GetTensorInfo(
59         inputBatchSize, inputChannels, inputHeight, inputWidth, dataLayout, ArmnnType);
60
61     armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo(
62         outputBatchSize, outputChannels, outputHeight, outputWidth, dataLayout, ArmnnType);
63
64     // Set quantization parameters if the requested type is a quantized type.
65     if(armnn::IsQuantizedType<T>())
66     {
67         inputTensorInfo.SetQuantizationScale(qScale);
68         inputTensorInfo.SetQuantizationOffset(qOffset);
69         outputTensorInfo.SetQuantizationScale(qScale);
70         outputTensorInfo.SetQuantizationOffset(qOffset);
71     }
72
73     LayerTestResult<T, 4> result(outputTensorInfo);
74
75     std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
76     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
77
78     armnn::Pooling2dQueueDescriptor queueDescriptor;
79     queueDescriptor.m_Parameters = descriptor;
80     queueDescriptor.m_Parameters.m_DataLayout = dataLayout;
81
82     armnn::WorkloadInfo workloadInfo;
83     AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfo, inputHandle.get());
84     AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
85
86     // Don't execute if Pooling is not supported, as an exception will be raised.
87     armnn::BackendId backend = workloadFactory.GetBackendId();
88     const size_t reasonIfUnsupportedMaxLen = 255;
89     char reasonIfUnsupported[reasonIfUnsupportedMaxLen+1];
90     result.supported = armnn::IsPooling2dSupported(backend, inputTensorInfo, outputTensorInfo,
91                                                    queueDescriptor.m_Parameters,
92                                                    reasonIfUnsupported, reasonIfUnsupportedMaxLen);
93     if (!result.supported)
94     {
95         return result;
96     }
97
98     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePooling2d(queueDescriptor, workloadInfo);
99
100     inputHandle->Allocate();
101     outputHandle->Allocate();
102
103     CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
104
105     workload->Execute();
106
107     CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
108
109     result.outputExpected = outputExpected;
110
111     return result;
112 }
113
114 //
115 // Tests max pooling with the following parameters:
116 //
117 //   Pooling size: 3x3
118 //   Stride:       (2,4)
119 //   input size:   8x13
120 //   channels:     2
121 //   batch size:   2
122 //
123 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
124 LayerTestResult<T, 4> SimpleMaxPooling2dSize3x3Stride2x4TestCommon(
125     armnn::IWorkloadFactory& workloadFactory,
126     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
127     bool forceNoPadding,
128     float qScale = 1.0f,
129     int32_t qOffset = 0)
130 {
131     armnn::Pooling2dDescriptor descriptor;
132     descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
133     descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
134     descriptor.m_StrideX = 2;
135     descriptor.m_StrideY = 4;
136     // forceNoPadding is mainly used for compatibility with ARM Compute.
137     // As of 16/05/2017, it errors if padX or padY are equal to or greater than the pool size.
138     descriptor.m_PadLeft = descriptor.m_PadRight = forceNoPadding ? 0 : 3;
139     descriptor.m_PadTop = descriptor.m_PadBottom = 0;
140     descriptor.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
141     descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
142
143     unsigned int inputWidth = 8;
144     unsigned int inputHeight = 13;
145     unsigned int outputWidth =
146         (inputWidth + descriptor.m_PadLeft + descriptor.m_PadRight + descriptor.m_StrideX - descriptor.m_PoolWidth) /
147         descriptor.m_StrideX;
148     unsigned int outputHeight =
149         (inputHeight + descriptor.m_PadTop + descriptor.m_PadBottom + descriptor.m_StrideY - descriptor.m_PoolHeight) /
150         descriptor.m_StrideY;
151     unsigned int channels = 2;
152     unsigned int batchSize = 2;
153
154     armnn::TensorInfo inputTensorInfo({ batchSize, channels, inputHeight, inputWidth }, ArmnnType);
155     armnn::TensorInfo outputTensorInfo({ batchSize, channels, outputHeight, outputWidth }, ArmnnType);
156
157     // Set quantization parameters if the requested type is a quantized type.
158     if(armnn::IsQuantizedType<T>())
159     {
160         inputTensorInfo.SetQuantizationScale(qScale);
161         inputTensorInfo.SetQuantizationOffset(qOffset);
162         outputTensorInfo.SetQuantizationScale(qScale);
163         outputTensorInfo.SetQuantizationOffset(qOffset);
164     }
165
166     std::vector<float> singleChannelData({
167         0.0f, 4.0f, 8.0f, 1.0f, 6.0f, 4.0f, 5.0f, 8.0f,
168         1.0f, 1.0f, 6.0f, 0.0f, 3.0f, 7.0f, 4.0f, 7.0f,
169         8.0f, 5.0f, 0.0f, 0.0f, 8.0f, 3.0f, 4.0f, 3.0f,
170         8.0f, 2.0f, 5.0f, 4.0f, 1.0f, 9.0f, 2.0f, 0.0f,
171         5.0f, 4.0f, 5.0f, 0.0f, 0.0f, 0.0f, 7.0f, 2.0f,
172         1.0f, 2.0f, 6.0f, 2.0f, 7.0f, 9.0f, 5.0f, 2.0f,
173         9.0f, 7.0f, 3.0f, 1.0f, 3.0f, 4.0f, 8.0f, 3.0f,
174         1.0f, 0.0f, 0.0f, 5.0f, 5.0f, 4.0f, 2.0f, 0.0f,
175         6.0f, 4.0f, 3.0f, 6.0f, 9.0f, 5.0f, 5.0f, 6.0f,
176         8.0f, 7.0f, 9.0f, 6.0f, 1.0f, 4.0f, 1.0f, 9.0f,
177         7.0f, 1.0f, 9.0f, 2.0f, 9.0f, 9.0f, 8.0f, 1.0f,
178         4.0f, 4.0f, 5.0f, 9.0f, 2.0f, 6.0f, 6.0f, 4.0f,
179         3.0f, 5.0f, 4.0f, 0.0f, 1.0f, 5.0f, 9.0f, 7.0f,
180     });
181
182     // Constructs input data.
183     std::vector<float> inputData;
184     auto negator = [](float f) { return -f; };
185
186     // First image (two channels where the second channel is the negative of the first one).
187     inputData.insert(inputData.end(), singleChannelData.begin(), singleChannelData.end());
188     std::transform(singleChannelData.begin(), singleChannelData.end(), std::back_inserter(inputData), negator);
189
190     // Second image (same as first image).
191     inputData.insert(inputData.end(), singleChannelData.begin(), singleChannelData.end());
192     std::transform(singleChannelData.begin(), singleChannelData.end(), std::back_inserter(inputData), negator);
193
194     auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(inputData, qScale, qOffset));
195
196     // These were calculated manually.
197     auto shape(GetTensorShapeAsArray<4>(outputTensorInfo));
198     boost::multi_array<T, 4> outputExpected(shape);
199     if (forceNoPadding)
200     {
201         outputExpected = MakeTensor<T, 4>(outputTensorInfo,
202             QuantizedVector<T>({
203                  8.0f,  8.0f,  8.0f,
204                  9.0f,  7.0f,  9.0f,
205                  9.0f,  9.0f,  9.0f,
206
207                  0.0f,  0.0f, -3.0f,
208                 -1.0f,  0.0f,  0.0f,
209                 -1.0f, -1.0f, -1.0f,
210
211                  8.0f,  8.0f,  8.0f,
212                  9.0f,  7.0f,  9.0f,
213                  9.0f,  9.0f,  9.0f,
214
215                  0.0f,  0.0f, -3.0f,
216                 -1.0f,  0.0f,  0.0f,
217                 -1.0f, -1.0f, -1.0f
218             },
219             qScale, qOffset));
220     }
221     else
222     {
223         outputExpected = MakeTensor<T, 4>(outputTensorInfo,
224             QuantizedVector<T>({
225                 0.0f, 8.0f, 8.0f, 8.0f, 8.0f, 8.0f,
226                 0.0f, 9.0f, 7.0f, 9.0f, 9.0f, 3.0f,
227                 0.0f, 8.0f, 9.0f, 9.0f, 9.0f, 9.0f,
228
229                 0.0f, 0.0f, 0.0f, 0.0f,-3.0f,-3.0f,
230                 0.0f,-1.0f, 0.0f, 0.0f, 0.0f,-2.0f,
231                 0.0f,-1.0f,-1.0f,-1.0f,-1.0f,-1.0f,
232
233                 0.0f, 8.0f, 8.0f, 8.0f, 8.0f, 8.0f,
234                 0.0f, 9.0f, 7.0f, 9.0f, 9.0f, 3.0f,
235                 0.0f, 8.0f, 9.0f, 9.0f, 9.0f, 9.0f,
236
237                 0.0f, 0.0f, 0.0f, 0.0f,-3.0f,-3.0f,
238                 0.0f,-1.0f, 0.0f, 0.0f, 0.0f,-2.0f,
239                 0.0f,-1.0f,-1.0f,-1.0f,-1.0f,-1.0f
240             },
241             qScale, qOffset));
242     }
243
244     return SimplePooling2dTestImpl<ArmnnType>(
245         workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
246 }
247
248 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
249 LayerTestResult<T, 4> SimpleMaxPooling2dTestCommon(
250     armnn::IWorkloadFactory& workloadFactory,
251     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
252     const armnn::DataLayout dataLayout = armnn::DataLayout::NCHW,
253     float qScale = 1.0f,
254     int32_t qOffset = 0)
255 {
256     armnn::Pooling2dDescriptor descriptor;
257     descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
258     descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
259     descriptor.m_StrideX = descriptor.m_StrideY = 2;
260     descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
261     descriptor.m_DataLayout = dataLayout;
262
263     armnn::TensorInfo inputTensorInfo  = armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, ArmnnType);
264     armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 2, 2, dataLayout, ArmnnType);
265
266     // Set quantization parameters if the requested type is a quantized type.
267     if(armnn::IsQuantizedType<T>())
268     {
269         inputTensorInfo.SetQuantizationScale(qScale);
270         inputTensorInfo.SetQuantizationOffset(qOffset);
271         outputTensorInfo.SetQuantizationScale(qScale);
272         outputTensorInfo.SetQuantizationOffset(qOffset);
273     }
274
275     std::vector<T> inputData(
276         QuantizedVector<T>({
277              1.0f,  2.0f,  5.0f,  6.0f,
278              3.0f,  4.0f,  7.0f,  8.0f,
279              9.0f, 10.0f, 13.0f, 14.0f,
280             11.0f, 12.0f, 15.0f, 16.0f,
281
282             17.0f, 18.0f, 21.0f, 22.0f,
283             19.0f, 20.0f, 23.0f, 24.0f,
284             25.0f, 26.0f, 29.0f, 30.0f,
285             27.0f, 28.0f, 31.0f, 32.0f,
286         },
287         qScale, qOffset));
288
289     std::vector<T> outputData(
290         QuantizedVector<T>({
291              4.0f,  8.0f,
292             12.0f, 16.0f,
293
294             20.0f, 24.0f,
295             28.0f, 32.0f,
296         },
297         qScale, qOffset));
298
299     const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
300     if (dataLayout == armnn::DataLayout::NHWC)
301     {
302         std::vector<T> tmp(inputData.size());
303         armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(T));
304         inputData = tmp;
305
306         std::vector<T> tmp1(outputData.size());
307         armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(T));
308         outputData = tmp1;
309     }
310
311     auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
312
313     auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputData);
314
315     return SimplePooling2dTestImpl<ArmnnType>(
316         workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
317 }
318
319 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
320 LayerTestResult<T, 4> SimpleAveragePooling2dTestCommon(
321     armnn::IWorkloadFactory& workloadFactory,
322     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
323     armnn::DataLayout dataLayout = armnn::DataLayout::NCHW,
324     float qScale = 1.0f,
325     int32_t qOffset = 0)
326 {
327     armnn::Pooling2dDescriptor descriptor;
328     descriptor.m_PoolType = armnn::PoolingAlgorithm::Average;
329     descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
330     descriptor.m_StrideX = descriptor.m_StrideY = 2;
331     descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
332     descriptor.m_DataLayout = dataLayout;
333
334     armnn::TensorInfo inputTensorInfo  = armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, ArmnnType);
335     armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 2, 2, dataLayout, ArmnnType);
336
337     // Set quantization parameters if the requested type is a quantized type.
338     if(armnn::IsQuantizedType<T>())
339     {
340         inputTensorInfo.SetQuantizationScale(qScale);
341         inputTensorInfo.SetQuantizationOffset(qOffset);
342         outputTensorInfo.SetQuantizationScale(qScale);
343         outputTensorInfo.SetQuantizationOffset(qOffset);
344     }
345
346     std::vector<T> inputData(
347         QuantizedVector<T>({
348              2.0f,  2.0f,  6.0f,  6.0f,
349              4.0f,  4.0f,  8.0f,  8.0f,
350             10.0f, 12.0f, 14.0f, 16.0f,
351             10.0f, 12.0f, 16.0f, 14.0f,
352
353             18.0f, 20.0f, 24.0f, 22.0f,
354             20.0f, 18.0f, 22.0f, 24.0f,
355             26.0f, 28.0f,  0.0f,  0.0f,
356             26.0f, 28.0f,  0.0f,  0.0f,
357         },
358         qScale, qOffset));
359
360     std::vector<T> outputData(
361         QuantizedVector<T>({
362              3.0f,  7.0f,
363             11.0f, 15.0f,
364
365             19.0f, 23.0f,
366             27.0f,  0.0f,
367         },
368         qScale, qOffset));
369
370     const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
371     if (dataLayout == armnn::DataLayout::NHWC)
372     {
373         std::vector<T> tmp(inputData.size());
374         armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(T));
375         inputData = tmp;
376
377         std::vector<T> tmp1(outputData.size());
378         armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(T));
379         outputData = tmp1;
380     }
381
382     auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
383
384     auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputData);
385
386     return SimplePooling2dTestImpl<ArmnnType>(
387         workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
388 }
389
390 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
391 LayerTestResult<T, 4> LargeTensorsAveragePooling2dTestCommon(
392     armnn::IWorkloadFactory& workloadFactory,
393     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
394     float qScale = 1.0f,
395     int32_t qOffset = 0)
396 {
397     armnn::Pooling2dDescriptor descriptor;
398     descriptor.m_PoolType = armnn::PoolingAlgorithm::Average;
399     descriptor.m_PoolWidth = descriptor.m_PoolHeight = 100;
400     descriptor.m_StrideX = descriptor.m_StrideY = 5;
401     descriptor.m_PadLeft = 50;
402     descriptor.m_PadRight = 50;
403     descriptor.m_PadTop = 50;
404     descriptor.m_PadBottom = 50;
405     descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
406
407     armnn::TensorInfo inputTensorInfo({ 5, 3, 52, 60 }, ArmnnType);
408     armnn::TensorInfo outputTensorInfo({ 5, 3, 11, 13 }, ArmnnType);
409
410     // Set quantization parameters if the requested type is a quantized type.
411     if(armnn::IsQuantizedType<T>())
412     {
413         inputTensorInfo.SetQuantizationScale(qScale);
414         inputTensorInfo.SetQuantizationOffset(qOffset);
415         outputTensorInfo.SetQuantizationScale(qScale);
416         outputTensorInfo.SetQuantizationOffset(qOffset);
417     }
418
419     std::vector<T> inputVec;
420
421     for (unsigned int i = 0 ; i < inputTensorInfo.GetShape().GetNumElements(); ++i)
422     {
423         inputVec.push_back(1);
424     }
425
426     auto input = MakeTensor<T, 4>(inputTensorInfo, inputVec);
427
428     std::vector<T> outputVec;
429
430     for (unsigned int i = 0 ; i < outputTensorInfo.GetShape().GetNumElements(); ++i)
431     {
432         outputVec.push_back(1);
433     }
434
435     auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputVec);
436
437     return SimplePooling2dTestImpl<ArmnnType>(
438         workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
439 }
440
441 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
442 LayerTestResult<T, 4> SimpleL2Pooling2dTestCommon(
443     armnn::IWorkloadFactory& workloadFactory,
444     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
445     armnn::DataLayout dataLayout = armnn::DataLayout::NCHW,
446     float qScale = 1.0f,
447     int32_t qOffset = 0)
448 {
449     armnn::Pooling2dDescriptor descriptor;
450     descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
451     descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
452     descriptor.m_StrideX = descriptor.m_StrideY = 2;
453     descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
454     descriptor.m_DataLayout = dataLayout;
455
456     armnn::TensorInfo inputTensorInfo  = armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, ArmnnType);
457     armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 2, 2, dataLayout, ArmnnType);
458
459     std::vector<T> inputData(
460         QuantizedVector<T>({
461             1.0f, 7.0f, 5.0f, 5.0f,
462             1.0f, 7.0f, 5.0f, 5.0f,
463             3.0f, 3.0f, 1.0f, 1.0f,
464             3.0f, 3.0f, 1.0f, 1.0f,
465
466             1.0f, 7.0f, 0.0f, 0.0f,
467             1.0f, 7.0f, 2.0f, 0.0f,
468             0.0f, 2.0f, 1.0f, 1.0f,
469             0.0f, 0.0f, 1.0f, 1.0f,
470         },
471         qScale, qOffset));
472
473     std::vector<T> outputData(
474         QuantizedVector<T>({
475             5.0f, 5.0f,
476             3.0f, 1.0f,
477
478             5.0f, 1.0f,
479             1.0f, 1.0f,
480         },
481         qScale, qOffset));
482
483     const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
484     if (dataLayout == armnn::DataLayout::NHWC)
485     {
486         std::vector<T> tmp(inputData.size());
487         armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(T));
488         inputData = tmp;
489
490         std::vector<T> tmp1(outputData.size());
491         armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(T));
492         outputData = tmp1;
493     }
494
495     auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
496
497     auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputData);
498
499     return SimplePooling2dTestImpl<ArmnnType>(
500         workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
501 }
502
503 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
504 LayerTestResult<T, 4> L2Pooling2dSize3Stride1TestCommon(
505     armnn::IWorkloadFactory& workloadFactory,
506     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
507     float qScale = 1.0f,
508     int32_t qOffset = 0)
509 {
510     armnn::Pooling2dDescriptor descriptor;
511     descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
512     descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
513     descriptor.m_StrideX = descriptor.m_StrideY = 1;
514     descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
515
516     armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
517     auto input = MakeTensor<T, 4>(inputTensorInfo,
518         QuantizedVector<T>({
519             2.0f, 1.0f, 5.0f, 2.0f,
520             1.0f, 2.0f, 2.0f, 1.0f,
521             5.0f, 4.0f, 1.0f, 5.0f,
522             2.0f, 1.0f, 5.0f, 2.0f,
523         },
524         qScale, qOffset));
525
526     armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, ArmnnType);
527     auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
528         QuantizedVector<T>({
529             3.0f, 3.0f,
530             3.0f, 3.0f,
531         },
532         qScale, qOffset));
533
534     return SimplePooling2dTestImpl<ArmnnType>(
535         workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
536 }
537
538 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
539 LayerTestResult<T, 4> L2Pooling2dSize3Stride3TestCommon(
540     armnn::IWorkloadFactory& workloadFactory,
541     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
542     float qScale = 1.0f,
543     int32_t qOffset = 0)
544 {
545     armnn::Pooling2dDescriptor descriptor;
546     descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
547     descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
548     descriptor.m_StrideX = descriptor.m_StrideY = 3;
549     descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
550
551     armnn::TensorInfo inputTensorInfo({ 1, 1, 9, 9 }, ArmnnType);
552     auto input = MakeTensor<T, 4>(inputTensorInfo,
553         QuantizedVector<T>({
554             2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
555             1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
556             5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
557             2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
558             1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
559             5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
560             2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
561             1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
562             5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
563         },
564         qScale, qOffset));
565
566     armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, ArmnnType);
567     auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
568         QuantizedVector<T>({
569             3.0f, 3.0f, 3.0f,
570             3.0f, 3.0f, 3.0f,
571             3.0f, 3.0f, 3.0f,
572         },
573         qScale, qOffset));
574
575     return SimplePooling2dTestImpl<ArmnnType>(
576         workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
577 }
578
579 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
580 LayerTestResult<T, 4> L2Pooling2dSize3Stride4TestCommon(
581     armnn::IWorkloadFactory& workloadFactory,
582     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
583     float qScale = 1.0f,
584     int32_t qOffset = 0)
585 {
586     armnn::Pooling2dDescriptor descriptor;
587     descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
588     descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
589     descriptor.m_StrideX = descriptor.m_StrideY = 4;
590     descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
591
592     armnn::TensorInfo inputTensorInfo({ 1, 1, 7, 7 }, ArmnnType);
593     auto input = MakeTensor<T, 4>(inputTensorInfo,
594         QuantizedVector<T>({
595             2.0f, 1.0f, 5.0f, 0.0f, 2.0f, 1.0f, 5.0f,
596             1.0f, 2.0f, 2.0f, 0.0f, 1.0f, 2.0f, 2.0f,
597             5.0f, 4.0f, 1.0f, 0.0f, 5.0f, 4.0f, 1.0f,
598             0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
599             2.0f, 1.0f, 5.0f, 0.0f, 2.0f, 1.0f, 5.0f,
600             1.0f, 2.0f, 2.0f, 0.0f, 1.0f, 2.0f, 2.0f,
601             5.0f, 4.0f, 1.0f, 0.0f, 5.0f, 4.0f, 1.0f,
602         },
603         qScale, qOffset));
604
605     armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, ArmnnType);
606     auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
607         QuantizedVector<T>({
608             3.0f, 3.0f,
609             3.0f, 3.0f,
610         },
611         qScale, qOffset));
612
613     return SimplePooling2dTestImpl<ArmnnType>(
614         workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
615 }
616
617 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
618 LayerTestResult<T, 4> L2Pooling2dSize7TestCommon(
619     armnn::IWorkloadFactory& workloadFactory,
620     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
621     float qScale = 1.0f,
622     int32_t qOffset = 0)
623 {
624     armnn::Pooling2dDescriptor descriptor;
625     descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
626     descriptor.m_PoolWidth = descriptor.m_PoolHeight = 7;
627     descriptor.m_StrideX = descriptor.m_StrideY = 7;
628     descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
629
630     armnn::TensorInfo inputTensorInfo({ 1, 1, 7, 7 }, ArmnnType);
631     auto input = MakeTensor<T, 4>(inputTensorInfo,
632         QuantizedVector<T>({
633             1.0f, 0.0f, 2.0f, 0.0f,  3.0f, 0.0f, 4.0f,
634             0.0f, 0.0f, 0.0f, 0.0f,  0.0f, 0.0f, 0.0f,
635             0.0f, 5.0f, 0.0f, 6.0f,  0.0f, 7.0f, 0.0f,
636             8.0f, 0.0f, 9.0f, 0.0f, 10.0f, 0.0f, 5.0f,
637             0.0f, 5.0f, 0.0f, 2.0f,  0.0f, 1.0f, 1.0f,
638             0.0f, 0.0f, 0.0f, 0.0f,  0.0f, 0.0f, 0.0f,
639             0.0f, 0.0f, 0.0f, 0.0f,  0.0f, 0.0f, 0.0f,
640         },
641         qScale, qOffset));
642
643     armnn::TensorInfo outputTensorInfo({ 1, 1, 1, 1 }, ArmnnType);
644     auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
645         QuantizedVector<T>({
646             3.0f,
647         },
648         qScale, qOffset));
649
650     return SimplePooling2dTestImpl<ArmnnType>(
651         workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
652 }
653
654 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
655 LayerTestResult<T, 4> L2Pooling2dSize9TestCommon(
656     armnn::IWorkloadFactory& workloadFactory,
657     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
658     float qScale = 1.0f,
659     int32_t qOffset = 0)
660 {
661     armnn::Pooling2dDescriptor descriptor;
662     descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
663     descriptor.m_PoolWidth = descriptor.m_PoolHeight = 9;
664     descriptor.m_StrideX = descriptor.m_StrideY = 9;
665     descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
666
667     armnn::TensorInfo inputTensorInfo({ 1, 1, 9, 9 }, ArmnnType);
668     auto input = MakeTensor<T, 4>(inputTensorInfo,
669         QuantizedVector<T>({
670             2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
671             1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
672             5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
673             2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
674             1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
675             5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
676             2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
677             1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
678             5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
679         },
680         qScale, qOffset));
681
682     armnn::TensorInfo outputTensorInfo({ 1, 1, 1, 1 }, ArmnnType);
683     auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
684         QuantizedVector<T>({
685             3.0f,
686         },
687         qScale, qOffset));
688
689     return SimplePooling2dTestImpl<ArmnnType>(
690         workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
691 }
692
693 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
694 LayerTestResult<T, 4> AsymmetricNonSquarePooling2dTestCommon(
695     armnn::IWorkloadFactory& workloadFactory,
696     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
697     float qScale = 1.0f,
698     int32_t qOffset = 0)
699 {
700     armnn::TensorInfo inputTensorInfo({ 1, 1, 1, 3 }, ArmnnType);
701     armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, ArmnnType);
702
703     armnn::Pooling2dDescriptor descriptor;
704     descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
705     descriptor.m_PoolWidth = 2;
706     descriptor.m_PoolHeight = 3;
707     descriptor.m_StrideX = 2;
708     descriptor.m_StrideY = 1;
709     descriptor.m_PadLeft = 2;
710     descriptor.m_PadRight = 0;
711     descriptor.m_PadTop = 1;
712     descriptor.m_PadBottom = 2;
713     descriptor.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
714     descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
715
716     // Construct input data.
717     auto input = MakeTensor<T, 4>(inputTensorInfo,
718         QuantizedVector<T>({
719             1.0f, 3.0f, 4.0f,
720         },
721         qScale, qOffset));
722
723     // These were calculated manually.
724     auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
725         QuantizedVector<T>({
726             0.0f, 3.0f, 0.0f, 3.0f,
727         },
728         qScale, qOffset));
729
730     return SimplePooling2dTestImpl<ArmnnType>(
731         workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
732 }
733
734 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
735 LayerTestResult<T, 4> ComparePooling2dTestCommon(
736     armnn::IWorkloadFactory& workloadFactory,
737     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
738     armnn::IWorkloadFactory& refWorkloadFactory,
739     armnn::PoolingAlgorithm poolingType,
740     float qScale = 1.0f,
741     int32_t qOffset = 0)
742 {
743     boost::ignore_unused(memoryManager);
744     const unsigned int inputWidth = 16;
745     const unsigned int inputHeight = 32;
746     const unsigned int channelCount = 2;
747     const unsigned int batchSize = 5;
748
749     const unsigned int poolSize = 3;
750     const unsigned int strideX = 2;
751     const unsigned int strideY = 4;
752     const unsigned int padX = 0;
753     const unsigned int padY = 0;
754
755     const unsigned int outputWidth = (inputWidth + 2 * padX + strideX - poolSize) / strideX;
756     const unsigned int outputHeight = (inputHeight + 2 * padY + strideY - poolSize) / strideY;
757
758     armnn::TensorInfo inputTensorInfo;
759     armnn::TensorInfo outputTensorInfo;
760
761     unsigned int inputShape[] = { batchSize, channelCount, inputHeight, inputWidth };
762     unsigned int outputShape[] = { batchSize, channelCount, outputHeight, outputWidth };
763
764     inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType);
765     outputTensorInfo = armnn::TensorInfo(4, outputShape, ArmnnType);
766
767     // Set quantization parameters if the requested type is a quantized type.
768     if(armnn::IsQuantizedType<T>())
769     {
770         inputTensorInfo.SetQuantizationScale(qScale);
771         inputTensorInfo.SetQuantizationOffset(qOffset);
772         outputTensorInfo.SetQuantizationScale(qScale);
773         outputTensorInfo.SetQuantizationOffset(qOffset);
774     }
775
776     boost::multi_array<T, 4> input = MakeRandomTensor<T, 4>(inputTensorInfo, 81715);
777
778     LayerTestResult<T, 4> comparisonResult(outputTensorInfo);
779
780     std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
781     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
782
783     armnn::Pooling2dQueueDescriptor data;
784     armnn::WorkloadInfo info;
785     AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
786     AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
787     data.m_Parameters.m_PoolType = poolingType;
788     data.m_Parameters.m_PoolWidth = poolSize;
789     data.m_Parameters.m_PoolHeight = poolSize;
790     data.m_Parameters.m_StrideX = strideX;
791     data.m_Parameters.m_StrideY = strideY;
792     data.m_Parameters.m_PadLeft = padX;
793     data.m_Parameters.m_PadRight = padX;
794     data.m_Parameters.m_PadTop = padY;
795     data.m_Parameters.m_PadBottom = padY;
796     data.m_Parameters.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
797
798     std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
799     std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refWorkloadFactory.CreateTensorHandle(inputTensorInfo);
800
801     // Don't execute if Pooling is not supported, as an exception will be raised.
802     armnn::BackendId backend = workloadFactory.GetBackendId();
803     const size_t reasonIfUnsupportedMaxLen = 255;
804     char reasonIfUnsupported[reasonIfUnsupportedMaxLen+1];
805     comparisonResult.supported = armnn::IsPooling2dSupported(backend, inputTensorInfo, outputTensorInfo,
806                                                              data.m_Parameters,
807                                                              reasonIfUnsupported, reasonIfUnsupportedMaxLen);
808     if (!comparisonResult.supported)
809     {
810         return comparisonResult;
811     }
812
813     armnn::Pooling2dQueueDescriptor refData = data;
814     armnn::WorkloadInfo refInfo = info;
815     SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
816     SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
817
818     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePooling2d(data, info);
819     std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreatePooling2d(refData, refInfo);
820
821     outputHandleRef->Allocate();
822     inputHandleRef->Allocate();
823     inputHandle->Allocate();
824     outputHandle->Allocate();
825
826     CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
827     CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]);
828
829     workload->Execute();
830     workloadRef->Execute();
831
832     CopyDataFromITensorHandle(&comparisonResult.output[0][0][0][0], outputHandle.get());
833     CopyDataFromITensorHandle(&comparisonResult.outputExpected[0][0][0][0], outputHandleRef.get());
834
835     return comparisonResult;
836 }
837
838 //
839 // Tests max pooling with the following parameters:
840 //
841 //   Pooling size: 2x2
842 //   Stride:       (2,2)
843 //   input size:   4x4
844 //   channels:     1
845 //   batch size:   1
846 //
847 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
848 LayerTestResult<T, 4> SimpleMaxPooling2dSize2x2Stride2x2TestCommon(
849     armnn::IWorkloadFactory& workloadFactory,
850     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
851     bool forceNoPadding,
852     float qScale = 1.0f,
853     int32_t qOffset = 0)
854 {
855     armnn::Pooling2dDescriptor descriptor;
856     descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
857     descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
858     descriptor.m_StrideX = 2;
859     descriptor.m_StrideY = 2;
860     descriptor.m_PadLeft = descriptor.m_PadRight = forceNoPadding ? 0 : 3;
861     descriptor.m_PadTop = descriptor.m_PadBottom = 0;
862     descriptor.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
863     descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
864
865
866     unsigned int inputWidth = 4;
867
868     unsigned int inputHeight = 4;
869
870     unsigned int outputWidth =
871         (inputWidth + descriptor.m_PadLeft + descriptor.m_PadRight + descriptor.m_StrideX - descriptor.m_PoolWidth) /
872         descriptor.m_StrideX;
873     unsigned int outputHeight =
874         (inputHeight + descriptor.m_PadTop + descriptor.m_PadBottom + descriptor.m_StrideY - descriptor.m_PoolHeight) /
875         descriptor.m_StrideY;
876     unsigned int channels = 1;
877     unsigned int batchSize = 1;
878
879     std::vector<float> inputData = {
880         510.0f, 222.0f, 780.0f, 654.0f,
881         141.0f, 276.0f,  15.0f, 546.0f,
882         303.0f, 618.0f, 582.0f, 339.0f,
883         438.0f, 564.0f, 573.0f, 402.0f
884     };
885
886     // Note that left and right edges will be 0.f, due to the 2x2 max pooling only accessing zeros here.
887     std::vector<float> expectedOutputDataWithPadding = {
888         0.0f, 510.0f, 780.0f, 654.0f, 0.0f,
889         0.0f, 438.0f, 618.0f, 402.0f, 0.0f
890     };
891
892     std::vector<float> expectedOutputDataNoPadding = {
893         510.0f, 780.0f,
894         618.0f, 582.0f
895     };
896
897     armnn::TensorInfo inputTensorInfo({ batchSize, channels, inputHeight, inputWidth }, ArmnnType);
898
899     // Scale and offset should match input - we're just calculating maximum values.
900     armnn::TensorInfo outputTensorInfo({ batchSize, channels, outputHeight, outputWidth }, ArmnnType);
901
902     // Set quantization parameters if the requested type is a quantized type.
903     if(armnn::IsQuantizedType<T>())
904     {
905         inputTensorInfo.SetQuantizationScale(qScale);
906         inputTensorInfo.SetQuantizationOffset(qOffset);
907         outputTensorInfo.SetQuantizationScale(qScale);
908         outputTensorInfo.SetQuantizationOffset(qOffset);
909     }
910
911     auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(inputData, qScale, qOffset));
912
913     auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
914         forceNoPadding ? QuantizedVector<T>(expectedOutputDataNoPadding, qScale, qOffset) :
915                          QuantizedVector<T>(expectedOutputDataWithPadding, qScale, qOffset));
916
917     return SimplePooling2dTestImpl<ArmnnType>(
918         workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
919 }
920
921 //
922 // Tests max pooling with the following parameters:
923 //
924 //   Pooling size: 3x2
925 //   Stride:       (2,2)
926 //   input size:   3x2
927 //   channels:     1
928 //   batch size:   1
929 //
930 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
931 LayerTestResult<T, 4> IgnorePaddingAveragePooling2dSize3x2Stride2x2TestCommon(
932         armnn::IWorkloadFactory& workloadFactory,
933         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
934         bool forceNoPadding,
935         float qScale = 1.0f,
936         int32_t qOffset = 0)
937 {
938     armnn::Pooling2dDescriptor descriptor;
939     descriptor.m_PoolType = armnn::PoolingAlgorithm::Average;
940     descriptor.m_PoolWidth = 3;
941     descriptor.m_PoolHeight = 2;
942     descriptor.m_StrideX = 2;
943     descriptor.m_StrideY = 2;
944     descriptor.m_PadLeft = (forceNoPadding) ? 0 : 1;
945     descriptor.m_PadRight = descriptor.m_PadLeft;
946     descriptor.m_PadTop = 0;
947     descriptor.m_PadBottom = 0;
948     descriptor.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
949     descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
950
951     unsigned int inputWidth = 3;
952     unsigned int inputHeight = 2;
953     unsigned int outputWidth =
954         (inputWidth + descriptor.m_PadLeft + descriptor.m_PadRight + descriptor.m_StrideX - descriptor.m_PoolWidth) /
955         descriptor.m_StrideX;
956     unsigned int outputHeight =
957         (inputHeight + descriptor.m_PadTop + descriptor.m_PadBottom + descriptor.m_StrideY - descriptor.m_PoolHeight) /
958         descriptor.m_StrideY;
959     unsigned int channels = 1;
960     unsigned int batchSize = 1;
961
962     std::vector<float> inputData = {
963         3.0f, 6.0f, 9.0f,
964         12.0f, 15.0f, 18.0f,
965     };
966
967     std::vector<float> expectedOutputDataWithPadding = {
968         6.0f, 8.0f,
969     };
970
971     std::vector<float> expectedOutputDataNoPadding = {
972         10.5f,
973     };
974
975     armnn::TensorInfo inputTensorInfo({ batchSize, channels, inputHeight, inputWidth }, ArmnnType);
976
977     // Scale and offset should match input - we're just calculating average values.
978     armnn::TensorInfo outputTensorInfo({ batchSize, channels, outputHeight, outputWidth }, ArmnnType);
979
980     // Set quantization parameters if the requested type is a quantized type.
981     if(armnn::IsQuantizedType<T>())
982     {
983         inputTensorInfo.SetQuantizationScale(qScale);
984         inputTensorInfo.SetQuantizationOffset(qOffset);
985         outputTensorInfo.SetQuantizationScale(qScale);
986         outputTensorInfo.SetQuantizationOffset(qOffset);
987     }
988
989     auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(inputData, qScale, qOffset));
990
991     auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
992         forceNoPadding ? QuantizedVector<T>(expectedOutputDataNoPadding, qScale, qOffset) :
993                          QuantizedVector<T>(expectedOutputDataWithPadding, qScale, qOffset));
994
995     return SimplePooling2dTestImpl<ArmnnType>(
996         workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
997 }
998
999
1000 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
1001 LayerTestResult<T, 4> IgnorePaddingSimpleMaxPooling2dTestCommon(
1002     armnn::IWorkloadFactory& workloadFactory,
1003     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1004     float qScale = 1.0f,
1005     int32_t qOffset = 0)
1006 {
1007     armnn::Pooling2dDescriptor descriptor;
1008     descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
1009     descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
1010     descriptor.m_StrideX = descriptor.m_StrideY = 2;
1011     descriptor.m_PadLeft = 1;
1012     descriptor.m_PadRight = 1;
1013     descriptor.m_PadTop = 1;
1014     descriptor.m_PadBottom = 1;
1015     descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
1016
1017     armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
1018     armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, ArmnnType);
1019
1020     // Set quantization parameters if the requested type is a quantized type.
1021     if(armnn::IsQuantizedType<T>())
1022     {
1023         inputTensorInfo.SetQuantizationScale(qScale);
1024         inputTensorInfo.SetQuantizationOffset(qOffset);
1025         outputTensorInfo.SetQuantizationScale(qScale);
1026         outputTensorInfo.SetQuantizationOffset(qOffset);
1027     }
1028
1029     auto input = MakeTensor<T, 4>(inputTensorInfo,
1030         QuantizedVector<T>({
1031             -1.0f, -2.0f,  3.0f,  4.0f,
1032             -1.0f, -2.0f,  3.0f,  4.0f,
1033              1.0f,  2.0f, -3.0f, -4.0f,
1034              1.0f,  2.0f, -3.0f, -4.0f,
1035         },
1036         qScale, qOffset));
1037
1038     auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
1039         QuantizedVector<T>({
1040             -1.0f,  3.0f,  4.0f,
1041              1.0f,  3.0f,  4.0f,
1042              1.0f,  2.0f, -4.0f,
1043         },
1044         qScale, qOffset));
1045
1046     return SimplePooling2dTestImpl<ArmnnType>(
1047         workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
1048 }
1049
1050 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
1051 LayerTestResult<T, 4> IgnorePaddingMaxPooling2dSize3TestCommon(
1052     armnn::IWorkloadFactory& workloadFactory,
1053     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1054     float qScale = 1.0f,
1055     int32_t qOffset = 0)
1056 {
1057     armnn::Pooling2dDescriptor descriptor;
1058     descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
1059     descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
1060     descriptor.m_StrideX = descriptor.m_StrideY = 1;
1061     descriptor.m_PadLeft = 1;
1062     descriptor.m_PadRight = 1;
1063     descriptor.m_PadTop = 1;
1064     descriptor.m_PadBottom = 1;
1065     descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
1066
1067     armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
1068     armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
1069
1070     // Set quantization parameters if the requested type is a quantized type.
1071     if(armnn::IsQuantizedType<T>())
1072     {
1073         inputTensorInfo.SetQuantizationScale(qScale);
1074         inputTensorInfo.SetQuantizationOffset(qOffset);
1075         outputTensorInfo.SetQuantizationScale(qScale);
1076         outputTensorInfo.SetQuantizationOffset(qOffset);
1077     }
1078
1079     auto input = MakeTensor<T, 4>(inputTensorInfo,
1080         QuantizedVector<T>({
1081             -1.0f, -2.0f,  3.0f,  4.0f,
1082             -1.0f, -2.0f,  3.0f,  4.0f,
1083              1.0f,  2.0f, -3.0f, -4.0f,
1084              1.0f,  2.0f, -3.0f, -4.0f,
1085         },
1086         qScale, qOffset));
1087
1088     auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
1089         QuantizedVector<T>({
1090             -1.0f,  3.0f,  4.0f,  4.0f,
1091              2.0f,  3.0f,  4.0f,  4.0f,
1092              2.0f,  3.0f,  4.0f,  4.0f,
1093              2.0f,  2.0f,  2.0f, -3.0f,
1094         },
1095         qScale, qOffset));
1096
1097     return SimplePooling2dTestImpl<ArmnnType>(
1098         workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
1099 }
1100
1101 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
1102 LayerTestResult<T, 4> IgnorePaddingSimpleAveragePooling2dTestCommon(
1103     armnn::IWorkloadFactory& workloadFactory,
1104     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1105     float qScale = 1.0f,
1106     int32_t qOffset = 0)
1107 {
1108     armnn::Pooling2dDescriptor descriptor;
1109     descriptor.m_PoolType = armnn::PoolingAlgorithm::Average;
1110     descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
1111     descriptor.m_StrideX = descriptor.m_StrideY = 2;
1112     descriptor.m_PadLeft = 1;
1113     descriptor.m_PadRight = 1;
1114     descriptor.m_PadTop = 1;
1115     descriptor.m_PadBottom = 1;
1116     descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
1117
1118     armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
1119     armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, ArmnnType);
1120
1121     // Set quantization parameters if the requested type is a quantized type.
1122     if(armnn::IsQuantizedType<T>())
1123     {
1124         inputTensorInfo.SetQuantizationScale(qScale);
1125         inputTensorInfo.SetQuantizationOffset(qOffset);
1126         outputTensorInfo.SetQuantizationScale(qScale);
1127         outputTensorInfo.SetQuantizationOffset(qOffset);
1128     }
1129
1130     auto input = MakeTensor<T, 4>(inputTensorInfo,
1131         QuantizedVector<T>({
1132             12.0f, 20.0f, 32.0f, 40.0f,
1133             12.0f, 20.0f, 32.0f, 40.0f,
1134             12.0f, 20.0f, 32.0f, 40.0f,
1135             12.0f, 20.0f, 32.0f, 40.0f,
1136         },
1137         qScale, qOffset));
1138
1139     auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
1140         QuantizedVector<T>({
1141             3.0f,  13.0f,  10.0f,
1142             6.0f,  26.0f,  20.0f,
1143             3.0f,  13.0f,  10.0f,
1144         },
1145         qScale, qOffset));
1146
1147     return SimplePooling2dTestImpl<ArmnnType>(
1148         workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
1149 }
1150
1151 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
1152 LayerTestResult<T, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon(
1153     armnn::IWorkloadFactory& workloadFactory,
1154     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1155     float qScale = 1.0f,
1156     int32_t qOffset = 0)
1157 {
1158     armnn::Pooling2dDescriptor descriptor;
1159     descriptor.m_PoolType = armnn::PoolingAlgorithm::Average;
1160     descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
1161     descriptor.m_StrideX = descriptor.m_StrideY = 2;
1162     descriptor.m_PadLeft = 0;
1163     descriptor.m_PadRight = 0;
1164     descriptor.m_PadTop = 0;
1165     descriptor.m_PadBottom = 0;
1166     descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
1167     descriptor.m_OutputShapeRounding = armnn::OutputShapeRounding::Ceiling;
1168
1169     armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4}, ArmnnType);
1170     armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, ArmnnType);
1171
1172     // Set quantization parameters if the requested type is a quantized type.
1173     if(armnn::IsQuantizedType<T>())
1174     {
1175         inputTensorInfo.SetQuantizationScale(qScale);
1176         inputTensorInfo.SetQuantizationOffset(qOffset);
1177         outputTensorInfo.SetQuantizationScale(qScale);
1178         outputTensorInfo.SetQuantizationOffset(qOffset);
1179     }
1180
1181     auto input = MakeTensor<T, 4>(inputTensorInfo,
1182         QuantizedVector<T>({
1183             1.0f, 2.0f, 3.0f, 4.0f,
1184             1.0f, 2.0f, 3.0f, 4.0f,
1185             1.0f, 2.0f, 3.0f, 4.0f,
1186             1.0f, 2.0f, 3.0f, 4.0f,
1187         },
1188         qScale, qOffset));
1189
1190     auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
1191         QuantizedVector<T>({
1192             2.0f, 3.5f,
1193             2.0f, 3.5f
1194         },
1195         qScale, qOffset));
1196
1197     return SimplePooling2dTestImpl<ArmnnType>(
1198         workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
1199 }
1200
1201 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
1202 LayerTestResult<T, 4> IgnorePaddingAveragePooling2dSize3TestCommon(
1203     armnn::IWorkloadFactory& workloadFactory,
1204     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1205     float qScale = 1.0f,
1206     int32_t qOffset = 0)
1207 {
1208     armnn::Pooling2dDescriptor descriptor;
1209     descriptor.m_PoolType = armnn::PoolingAlgorithm::Average;
1210     descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
1211     descriptor.m_StrideX = descriptor.m_StrideY = 1;
1212     descriptor.m_PadLeft = 1;
1213     descriptor.m_PadRight = 1;
1214     descriptor.m_PadTop = 1;
1215     descriptor.m_PadBottom = 1;
1216     descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
1217
1218     armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
1219     armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
1220
1221     // Set quantization parameters if the requested type is a quantized type.
1222     if(armnn::IsQuantizedType<T>())
1223     {
1224         inputTensorInfo.SetQuantizationScale(qScale);
1225         inputTensorInfo.SetQuantizationOffset(qOffset);
1226         outputTensorInfo.SetQuantizationScale(qScale);
1227         outputTensorInfo.SetQuantizationOffset(qOffset);
1228     }
1229
1230     auto input = MakeTensor<T, 4>(inputTensorInfo,
1231         QuantizedVector<T>({
1232             9.0f,   27.0f,  18.0f,  36.0f,
1233             18.0f,   9.0f,  18.0f,   9.0f,
1234             27.0f,  18.0f,   9.0f,  27.0f,
1235             9.0f,   27.0f,   9.0f,  18.0f,
1236         },
1237         qScale, qOffset));
1238
1239     auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
1240         QuantizedVector<T>({
1241              7.0f,  11.0f,  13.0f, 9.0f,
1242             12.0f,  17.0f,  19.0f, 13.0f,
1243             12.0f,  16.0f,  16.0f, 10.0f,
1244              9.0f,  11.0f,  12.0f, 7.0f,
1245         },
1246         qScale, qOffset));
1247
1248     return SimplePooling2dTestImpl<ArmnnType>(
1249         workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
1250 }
1251
1252 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
1253 LayerTestResult<T, 4> IgnorePaddingSimpleL2Pooling2dTestCommon(
1254     armnn::IWorkloadFactory& workloadFactory,
1255     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1256     float qScale = 1.0f,
1257     int32_t qOffset = 0)
1258 {
1259     armnn::Pooling2dDescriptor descriptor;
1260     descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
1261     descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
1262     descriptor.m_StrideX = descriptor.m_StrideY = 2;
1263     descriptor.m_PadLeft = 1;
1264     descriptor.m_PadRight = 1;
1265     descriptor.m_PadTop = 1;
1266     descriptor.m_PadBottom = 1;
1267     descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
1268
1269     armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
1270     armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, ArmnnType);
1271
1272     // Set quantization parameters if the requested type is a quantized type.
1273     if(armnn::IsQuantizedType<T>())
1274     {
1275         inputTensorInfo.SetQuantizationScale(qScale);
1276         inputTensorInfo.SetQuantizationOffset(qOffset);
1277         outputTensorInfo.SetQuantizationScale(qScale);
1278         outputTensorInfo.SetQuantizationOffset(qOffset);
1279     }
1280
1281     auto input = MakeTensor<T, 4>(inputTensorInfo,
1282         QuantizedVector<T>({
1283             2.0f,  4.0f, 8.0f, 16.0f,
1284             4.0f,  2.0f, 2.0f, 4.0f,
1285             8.0f,  2.0f, 4.0f, 2.0f,
1286             16.0f, 2.0f, 2.0f, 8.0f,
1287         },
1288         qScale, qOffset));
1289
1290     auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
1291         QuantizedVector<T>({
1292                1.0f,     4.4721f,   8.0f,
1293             4.4721f,     2.6457f,   2.236f,
1294                8.0f,     1.4142f,   4.0f,
1295         },
1296         qScale, qOffset));
1297
1298     return SimplePooling2dTestImpl<ArmnnType>(
1299         workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
1300 }
1301
1302 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
1303 LayerTestResult<T, 4> IgnorePaddingL2Pooling2dSize3TestCommon(
1304     armnn::IWorkloadFactory& workloadFactory,
1305     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1306     float qScale = 1.0f,
1307     int32_t qOffset = 0)
1308 {
1309     armnn::Pooling2dDescriptor descriptor;
1310     descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
1311     descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
1312     descriptor.m_StrideX = descriptor.m_StrideY = 1;
1313     descriptor.m_PadLeft = 1;
1314     descriptor.m_PadRight = 1;
1315     descriptor.m_PadTop = 1;
1316     descriptor.m_PadBottom = 1;
1317     descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
1318
1319     armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
1320     armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
1321
1322     // Set quantization parameters if the requested type is a quantized type.
1323     if(armnn::IsQuantizedType<T>())
1324     {
1325         inputTensorInfo.SetQuantizationScale(qScale);
1326         inputTensorInfo.SetQuantizationOffset(qOffset);
1327         outputTensorInfo.SetQuantizationScale(qScale);
1328         outputTensorInfo.SetQuantizationOffset(qOffset);
1329     }
1330
1331     auto input = MakeTensor<T, 4>(inputTensorInfo,
1332         QuantizedVector<T>({
1333             1.0f, 2.0f, 3.0f, 4.0f,
1334             1.0f, 2.0f, 3.0f, 4.0f,
1335             1.0f, 2.0f, 3.0f, 4.0f,
1336             1.0f, 2.0f, 3.0f, 4.0f,
1337         },
1338         qScale, qOffset));
1339
1340     auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
1341         QuantizedVector<T>({
1342             1.0540f, 1.7638f, 2.5385f, 2.3570f,
1343             1.2909f, 2.1602f, 3.1091f, 2.8867f,
1344             1.2909f, 2.1602f, 3.1091f, 2.8867f,
1345             1.0540f, 1.7638f, 2.5385f, 2.3570f,
1346         },
1347         qScale, qOffset));
1348
1349     return SimplePooling2dTestImpl<ArmnnType>(
1350         workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
1351 }
1352
1353 } // anonymous namespace
1354
1355 LayerTestResult<float, 4> SimpleMaxPooling2dSize2x2Stride2x2Test(
1356     armnn::IWorkloadFactory& workloadFactory,
1357     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1358     bool forceNoPadding)
1359 {
1360     return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::Float32>(
1361         workloadFactory, memoryManager, forceNoPadding);
1362 }
1363
1364 LayerTestResult<uint8_t, 4> SimpleMaxPooling2dSize2x2Stride2x2Uint8Test(
1365     armnn::IWorkloadFactory& workloadFactory,
1366     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1367     bool forceNoPadding)
1368 {
1369     return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::QuantisedAsymm8>(
1370         workloadFactory, memoryManager, forceNoPadding, 3.0f, -5);
1371 }
1372
1373 LayerTestResult<int16_t, 4> SimpleMaxPooling2dSize2x2Stride2x2Int16Test(
1374     armnn::IWorkloadFactory& workloadFactory,
1375     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1376     bool forceNoPadding)
1377 {
1378     return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::QuantisedSymm16>(
1379             workloadFactory, memoryManager, forceNoPadding);
1380 }
1381
1382 LayerTestResult<float, 4> SimpleMaxPooling2dSize3x3Stride2x4Test(
1383     armnn::IWorkloadFactory& workloadFactory,
1384     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1385     bool forceNoPadding)
1386 {
1387     return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::Float32>(
1388         workloadFactory, memoryManager, forceNoPadding);
1389 }
1390
1391 LayerTestResult<uint8_t, 4> SimpleMaxPooling2dSize3x3Stride2x4Uint8Test(
1392     armnn::IWorkloadFactory& workloadFactory,
1393     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1394     bool forceNoPadding)
1395 {
1396     return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::QuantisedAsymm8>(
1397         workloadFactory, memoryManager, forceNoPadding, 0.1f, 128);
1398 }
1399
1400 LayerTestResult<int16_t, 4> SimpleMaxPooling2dSize3x3Stride2x4Int16Test(
1401     armnn::IWorkloadFactory& workloadFactory,
1402     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1403     bool forceNoPadding)
1404 {
1405     return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::QuantisedSymm16>(
1406             workloadFactory, memoryManager, forceNoPadding);
1407 }
1408
1409 LayerTestResult<float, 4> SimpleMaxPooling2dTest(
1410     armnn::IWorkloadFactory& workloadFactory,
1411     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1412     const armnn::DataLayout dataLayout)
1413 {
1414     return SimpleMaxPooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
1415 }
1416
1417 LayerTestResult<uint8_t, 4> SimpleMaxPooling2dUint8Test(
1418     armnn::IWorkloadFactory& workloadFactory,
1419     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1420     const armnn::DataLayout dataLayout)
1421 {
1422     return SimpleMaxPooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, dataLayout);
1423 }
1424
1425 LayerTestResult<int16_t, 4> SimpleMaxPooling2dInt16Test(
1426     armnn::IWorkloadFactory& workloadFactory,
1427     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1428     const armnn::DataLayout dataLayout)
1429 {
1430     return SimpleMaxPooling2dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, dataLayout);
1431 }
1432 LayerTestResult<float, 4> IgnorePaddingSimpleMaxPooling2dTest(
1433     armnn::IWorkloadFactory& workloadFactory,
1434     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1435 {
1436     return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1437 }
1438
1439 LayerTestResult<uint8_t, 4> IgnorePaddingSimpleMaxPooling2dUint8Test(
1440     armnn::IWorkloadFactory& workloadFactory,
1441     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1442 {
1443     return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
1444             workloadFactory, memoryManager, 1.0f, -5);
1445 }
1446
1447 LayerTestResult<int16_t, 4> IgnorePaddingSimpleMaxPooling2dInt16Test(
1448     armnn::IWorkloadFactory& workloadFactory,
1449     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1450 {
1451     return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::QuantisedSymm16>(
1452             workloadFactory, memoryManager);
1453 }
1454
1455 LayerTestResult<float, 4> IgnorePaddingMaxPooling2dSize3Test(
1456     armnn::IWorkloadFactory& workloadFactory,
1457     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1458 {
1459     return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1460 }
1461
1462 LayerTestResult<uint8_t, 4> IgnorePaddingMaxPooling2dSize3Uint8Test(
1463     armnn::IWorkloadFactory& workloadFactory,
1464     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1465 {
1466     return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::QuantisedAsymm8>(
1467             workloadFactory, memoryManager, 1.0f, -5);
1468 }
1469
1470 LayerTestResult<int16_t, 4> IgnorePaddingMaxPooling2dSize3Int16Test(
1471     armnn::IWorkloadFactory& workloadFactory,
1472     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1473 {
1474     return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::QuantisedSymm16>(
1475             workloadFactory, memoryManager);
1476 }
1477
1478 LayerTestResult<float, 4> SimpleAveragePooling2dTest(
1479     armnn::IWorkloadFactory& workloadFactory,
1480     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1481     const armnn::DataLayout dataLayout)
1482 {
1483     return SimpleAveragePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
1484 }
1485
1486 LayerTestResult<uint8_t, 4> SimpleAveragePooling2dUint8Test(
1487     armnn::IWorkloadFactory& workloadFactory,
1488     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1489     const armnn::DataLayout dataLayout)
1490 {
1491     return SimpleAveragePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
1492         workloadFactory, memoryManager, dataLayout, 0.5, -1);
1493 }
1494
1495 LayerTestResult<int16_t, 4> SimpleAveragePooling2dInt16Test(
1496     armnn::IWorkloadFactory& workloadFactory,
1497     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1498     const armnn::DataLayout dataLayout)
1499 {
1500     return SimpleAveragePooling2dTestCommon<armnn::DataType::QuantisedSymm16>(
1501             workloadFactory, memoryManager, dataLayout);
1502 }
1503
1504 LayerTestResult<float, 4> IgnorePaddingAveragePooling2dSize3x2Stride2x2Test(
1505     armnn::IWorkloadFactory& workloadFactory,
1506     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1507     bool forceNoPadding)
1508 {
1509     return IgnorePaddingAveragePooling2dSize3x2Stride2x2TestCommon<armnn::DataType::Float32>(
1510         workloadFactory, memoryManager, forceNoPadding);
1511 }
1512
1513 LayerTestResult<float, 4> LargeTensorsAveragePooling2dTest(
1514     armnn::IWorkloadFactory& workloadFactory,
1515     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1516 {
1517     return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1518 }
1519
1520 LayerTestResult<uint8_t, 4> LargeTensorsAveragePooling2dUint8Test(
1521     armnn::IWorkloadFactory& workloadFactory,
1522     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1523 {
1524     return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
1525         workloadFactory, memoryManager, 0.5, -1);
1526 }
1527
1528 LayerTestResult<int16_t, 4> LargeTensorsAveragePooling2dInt16Test(
1529     armnn::IWorkloadFactory& workloadFactory,
1530     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1531 {
1532     return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::QuantisedSymm16>(
1533             workloadFactory, memoryManager);
1534 }
1535 LayerTestResult<float, 4> IgnorePaddingSimpleAveragePooling2dTest(
1536     armnn::IWorkloadFactory& workloadFactory,
1537     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1538 {
1539     return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1540 }
1541
1542 LayerTestResult<uint8_t, 4> IgnorePaddingSimpleAveragePooling2dUint8Test(
1543     armnn::IWorkloadFactory& workloadFactory,
1544     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1545 {
1546     return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
1547             workloadFactory, memoryManager);
1548 }
1549
1550 LayerTestResult<int16_t, 4> IgnorePaddingSimpleAveragePooling2dInt16Test(
1551     armnn::IWorkloadFactory& workloadFactory,
1552     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1553 {
1554     return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::QuantisedSymm16>(
1555             workloadFactory, memoryManager);
1556 }
1557
1558 LayerTestResult<float, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingTest(
1559     armnn::IWorkloadFactory& workloadFactory,
1560     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1561 {
1562     return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::Float32>(
1563             workloadFactory, memoryManager);
1564 }
1565
1566 LayerTestResult<uint8_t, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingUint8Test(
1567     armnn::IWorkloadFactory& workloadFactory,
1568     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1569 {
1570     return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::QuantisedAsymm8>(
1571             workloadFactory, memoryManager);
1572 }
1573
1574 LayerTestResult<int16_t, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingInt16Test(
1575     armnn::IWorkloadFactory& workloadFactory,
1576     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1577 {
1578     return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::QuantisedSymm16>(
1579             workloadFactory, memoryManager);
1580 }
1581
1582 LayerTestResult<float, 4> IgnorePaddingAveragePooling2dSize3Test(
1583     armnn::IWorkloadFactory& workloadFactory,
1584     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1585 {
1586     return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1587 }
1588
1589 LayerTestResult<uint8_t, 4> IgnorePaddingAveragePooling2dSize3Uint8Test(
1590     armnn::IWorkloadFactory& workloadFactory,
1591     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1592 {
1593     return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::QuantisedAsymm8>(
1594             workloadFactory, memoryManager);
1595 }
1596
1597 LayerTestResult<int16_t, 4> IgnorePaddingAveragePooling2dSize3Int16Test(
1598     armnn::IWorkloadFactory& workloadFactory,
1599     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1600 {
1601     return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::QuantisedSymm16>(
1602             workloadFactory, memoryManager);
1603 }
1604
1605 LayerTestResult<float, 4> SimpleL2Pooling2dTest(
1606     armnn::IWorkloadFactory& workloadFactory,
1607     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1608     const armnn::DataLayout dataLayout)
1609 {
1610     return SimpleL2Pooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
1611 }
1612
1613 LayerTestResult<uint8_t, 4> SimpleL2Pooling2dUint8Test(
1614     armnn::IWorkloadFactory& workloadFactory,
1615     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1616     const armnn::DataLayout dataLayout)
1617 {
1618     return SimpleL2Pooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, dataLayout);
1619 }
1620
1621 LayerTestResult<int16_t, 4> SimpleL2Pooling2dInt16Test(
1622     armnn::IWorkloadFactory& workloadFactory,
1623     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1624     const armnn::DataLayout dataLayout)
1625 {
1626     return SimpleL2Pooling2dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, dataLayout);
1627 }
1628
1629 LayerTestResult<float, 4> L2Pooling2dSize3Stride1Test(
1630     armnn::IWorkloadFactory& workloadFactory,
1631     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1632 {
1633     return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1634 }
1635
1636 LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride1Uint8Test(
1637     armnn::IWorkloadFactory& workloadFactory,
1638     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1639 {
1640     return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
1641 }
1642
1643 LayerTestResult<int16_t, 4> L2Pooling2dSize3Stride1Int16Test(
1644     armnn::IWorkloadFactory& workloadFactory,
1645     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1646 {
1647     return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
1648 }
1649
1650 LayerTestResult<float, 4> L2Pooling2dSize3Stride3Test(
1651     armnn::IWorkloadFactory& workloadFactory,
1652     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1653 {
1654     return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1655 }
1656
1657 LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride3Uint8Test(
1658     armnn::IWorkloadFactory& workloadFactory,
1659     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1660 {
1661     return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
1662 }
1663
1664 LayerTestResult<int16_t, 4> L2Pooling2dSize3Stride3Int16Test(
1665     armnn::IWorkloadFactory& workloadFactory,
1666     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1667 {
1668     return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
1669 }
1670 LayerTestResult<float, 4> L2Pooling2dSize3Stride4Test(
1671     armnn::IWorkloadFactory& workloadFactory,
1672     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1673 {
1674     return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1675 }
1676
1677 LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride4Uint8Test(
1678     armnn::IWorkloadFactory& workloadFactory,
1679     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1680 {
1681     return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
1682 }
1683
1684 LayerTestResult<int16_t, 4> L2Pooling2dSize3Stride4Int16Test(
1685     armnn::IWorkloadFactory& workloadFactory,
1686     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1687 {
1688     return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
1689 }
1690
1691 LayerTestResult<float, 4> L2Pooling2dSize7Test(
1692     armnn::IWorkloadFactory& workloadFactory,
1693     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1694 {
1695     return L2Pooling2dSize7TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1696 }
1697
1698 LayerTestResult<uint8_t, 4> L2Pooling2dSize7Uint8Test(
1699     armnn::IWorkloadFactory& workloadFactory,
1700     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1701 {
1702     return L2Pooling2dSize7TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
1703 }
1704
1705 LayerTestResult<int16_t, 4> L2Pooling2dSize7Int16Test(
1706     armnn::IWorkloadFactory& workloadFactory,
1707     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1708 {
1709     return L2Pooling2dSize7TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
1710 }
1711
1712 LayerTestResult<float, 4> L2Pooling2dSize9Test(
1713     armnn::IWorkloadFactory& workloadFactory,
1714     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1715 {
1716     return L2Pooling2dSize9TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1717 }
1718
1719 LayerTestResult<uint8_t, 4> L2Pooling2dSize9Uint8Test(
1720     armnn::IWorkloadFactory& workloadFactory,
1721     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1722 {
1723     return L2Pooling2dSize9TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
1724 }
1725
1726 LayerTestResult<int16_t, 4> L2Pooling2dSize9Int16Test(
1727     armnn::IWorkloadFactory& workloadFactory,
1728     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1729 {
1730     return L2Pooling2dSize9TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
1731 }
1732 LayerTestResult<float, 4> IgnorePaddingSimpleL2Pooling2dTest(
1733     armnn::IWorkloadFactory& workloadFactory,
1734     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1735 {
1736     return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1737 }
1738
1739 LayerTestResult<uint8_t, 4> IgnorePaddingSimpleL2Pooling2dUint8Test(
1740     armnn::IWorkloadFactory& workloadFactory,
1741     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1742 {
1743     return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
1744 }
1745
1746 LayerTestResult<int16_t, 4> IgnorePaddingSimpleL2Pooling2dInt16Test(
1747     armnn::IWorkloadFactory& workloadFactory,
1748     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1749 {
1750     return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
1751 }
1752
1753 LayerTestResult<float, 4> IgnorePaddingL2Pooling2dSize3Test(
1754     armnn::IWorkloadFactory& workloadFactory,
1755     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1756 {
1757     return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1758 }
1759
1760 LayerTestResult<uint8_t, 4> IgnorePaddingL2Pooling2dSize3Uint8Test(
1761     armnn::IWorkloadFactory& workloadFactory,
1762     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1763 {
1764     return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
1765 }
1766
1767 LayerTestResult<int16_t, 4> IgnorePaddingL2Pooling2dSize3Int16Test(
1768     armnn::IWorkloadFactory& workloadFactory,
1769     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1770 {
1771     return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
1772 }
1773
1774 LayerTestResult<float, 4> AsymmetricNonSquarePooling2dTest(
1775     armnn::IWorkloadFactory& workloadFactory,
1776     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1777 {
1778     return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1779 }
1780
1781 LayerTestResult<uint8_t, 4> AsymmetricNonSquarePooling2dUint8Test(
1782     armnn::IWorkloadFactory& workloadFactory,
1783     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1784 {
1785     return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
1786 }
1787
1788 LayerTestResult<int16_t, 4> AsymmetricNonSquarePooling2dInt16Test(
1789     armnn::IWorkloadFactory& workloadFactory,
1790     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1791 {
1792     return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
1793 }
1794
1795 LayerTestResult<float, 4> ComparePooling2dTest(
1796     armnn::IWorkloadFactory& workloadFactory,
1797     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1798     armnn::IWorkloadFactory& refWorkloadFactory,
1799     armnn::PoolingAlgorithm  poolingType)
1800 {
1801     return ComparePooling2dTestCommon<armnn::DataType::Float32>(
1802         workloadFactory, memoryManager, refWorkloadFactory, poolingType);
1803 }
1804
1805 LayerTestResult<uint8_t, 4> ComparePooling2dUint8Test(
1806     armnn::IWorkloadFactory& workloadFactory,
1807     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1808     armnn::IWorkloadFactory& refWorkloadFactory,
1809     armnn::PoolingAlgorithm  poolingType)
1810 {
1811     return ComparePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
1812         workloadFactory, memoryManager, refWorkloadFactory, poolingType, 0.1f, 128);
1813 }
1814
1815 LayerTestResult<int16_t, 4> ComparePooling2dInt16Test(
1816     armnn::IWorkloadFactory& workloadFactory,
1817     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1818     armnn::IWorkloadFactory& refWorkloadFactory,
1819     armnn::PoolingAlgorithm  poolingType)
1820 {
1821     return ComparePooling2dTestCommon<armnn::DataType::QuantisedSymm16>(
1822             workloadFactory, memoryManager, refWorkloadFactory, poolingType);
1823 }