Fix various uses of non-standard C++:
[platform/upstream/armnn.git] / src / backends / backendsCommon / test / LayerTests.cpp
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #include "LayerTests.hpp"
6 #include "WorkloadTestUtils.hpp"
7 #include "TensorUtils.hpp"
8 #include <ResolveType.hpp>
9
10 #include "test/TensorHelpers.hpp"
11 #include "TensorCopyUtils.hpp"
12 #include "Permute.hpp"
13
14 #include <boost/test/unit_test.hpp>
15 #include <boost/assert.hpp>
16
17 #include <armnn/LayerSupport.hpp>
18
19 #include <backendsCommon/CpuTensorHandle.hpp>
20 #include <backendsCommon/IBackendInternal.hpp>
21 #include <backendsCommon/WorkloadFactory.hpp>
22
23 #include <reference/workloads/RefWorkloads.hpp>
24
25 #include <algorithm>
26 #include <boost/cast.hpp>
27
28 #include "WorkloadTestUtils.hpp"
29 #include "Conv2dTestImpl.hpp"
30 #include "BatchNormTestImpl.hpp"
31 #include "ActivationTestImpl.hpp"
32 #include "Pooling2dTestImpl.hpp"
33 #include "FullyConnectedTestImpl.hpp"
34 #include "GatherTestImpl.hpp"
35 #include "SpaceToBatchNdTestImpl.hpp"
36 #include "SpaceToDepthTestImpl.hpp"
37 #include "SplitterTestImpl.hpp"
38 #include "SoftmaxTestImpl.hpp"
39 #include "StridedSliceTestImpl.hpp"
40 #include "NormTestImpl.hpp"
41 #include "LstmTestImpl.hpp"
42 #include "ConvertFp16ToFp32TestImpl.hpp"
43 #include "ConvertFp32ToFp16TestImpl.hpp"
44 #include "DebugTestImpl.hpp"
45 #include "DequantizeTestImpl.hpp"
46 #include "QuantizeTestImpl.hpp"
47 #include "TransposeConvolution2dTestImpl.hpp"
48
49 // 3-channel 16x8 image used as common input data for a number of Conv2d tests.
50 static std::vector<float> ConvInput3x8x16({
51     0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
52     0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
53     0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
54     0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
55     0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
56     0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
57     0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
58     0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
59     0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
60     0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
61     0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
62     0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
63     0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
64     0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
65     0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
66     0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
67     -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
68     -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
69     -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
70     -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
71     -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
72     -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
73     -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
74     -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1
75 });
76
77 // 2-channel bias used by a number of Conv2d tests.
78 static std::vector<float> Bias2({0, 2});
79
80 struct Simple3dSoftmaxOutputData
81 {
82     const std::vector<float> outputData =
83             {
84                 0.0964599f, 0.26220518f, 0.0964599f, 0.0964599f,
85                 0.15903549f, 0.0964599f, 0.0964599f, 0.0964599f
86             };
87
88     const armnn::TensorShape inputShape{ 1, 8, 1 };
89
90     const std::vector<float> inputData =
91             {
92                     0.f, 1.f, 0.f, 0.f,
93                     .5f, 0.f, 0.f, 0.f,
94             };
95 };
96
97 struct Simple4dSoftmaxData
98 {
99     const armnn::TensorShape inputShape{ 1, 8, 1, 1 };
100
101     const std::vector<float> outputData = { 0.0964599f, 0.26220518f, 0.0964599f, 0.0964599f,
102                                             0.15903549f, 0.0964599f, 0.0964599f, 0.0964599f };
103     const std::vector<float> inputData =
104             {
105                     0.f, 1.f, 0.f, 0.f,
106                     .5f, 0.f, 0.f, 0.f
107             };
108 };
109
110 // Helper function that returns either Bias2 or an empty vector depending on whether bias is enabled.
111 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
112 boost::multi_array<T, 1> GetBias2(bool biasEnabled, float qScale)
113 {
114     if(biasEnabled)
115     {
116         armnn::TensorInfo biasDesc({static_cast<unsigned int>(Bias2.size())}, ArmnnType);
117         boost::multi_array<T, 1> bias = MakeTensor<T, 1>(biasDesc, QuantizedVector<T>(qScale, 0.0f, Bias2));
118         return bias;
119     }
120     else
121     {
122         return boost::multi_array<T, 1>();
123     }
124 }
125
126 template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
127 LayerTestResult<T, 4> SimpleConvolution2d3x5TestCommon(
128     armnn::IWorkloadFactory& workloadFactory,
129     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
130     float qScale,
131     int32_t qOffset,
132     bool biasEnabled,
133     const armnn::DataLayout layout)
134 {
135     // Use common single-batch 3-channel 16x8 image.
136     armnn::TensorInfo inputDesc({1, 3, 8, 16}, ArmnnType);
137     boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, QuantizedVector<T>(qScale, qOffset, ConvInput3x8x16));
138
139     // Use a 2-element batch with 3-channel 3x5 kernels.
140     armnn::TensorInfo kernelDesc({2, 3, 5, 3}, ArmnnType);
141     boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
142         QuantizedVector<T>(qScale, qOffset, {
143             1, 1, 1,
144             1, -1, 1,
145             1, 1, 1,
146             1, 1, 1,
147             1, 1, 1,
148
149             0, 0, 0,
150             0, 0, 0,
151             0, 0, 0,
152             0, 0, 0,
153             0, 0, 0,
154
155             2, 2, 2,
156             2, 2, 2,
157             2, 2, 2,
158             2, 2, 2,
159             2, 2, 2,
160
161
162             0, 0, 0,
163             0, 0, 0,
164             0, 0, 0,
165             0, 0, 0,
166             0, 0, 0,
167
168             1, 1, 1,
169             1, 1, 1,
170             1, 1, 1,
171             1, 1, 1,
172             1, 1, 1,
173
174             0, 0, 0,
175             0, 0, 0,
176             0, 0, 0,
177             0, 0, 0,
178             0, 0, 0
179         })));
180
181     // Expected output is 2 batch elements of a 1-channel 14x4 image.
182     armnn::TensorInfo outputDesc({1, 2, 4, 14}, ArmnnType);
183     boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
184         QuantizedVector<T>(qScale, qOffset, {
185             -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24,
186             -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25,
187             -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f,
188             -23.5f, -23.5f, -23.5f,
189             -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f,
190             -23.5f, -23.5f, -23.5f,
191
192             5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
193             5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
194             5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
195             5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
196         })));
197
198     return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
199         workloadFactory,
200         memoryManager,
201         input,
202         kernel,
203         GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
204         expectedOutput,
205         qScale,
206         qOffset,
207         layout);
208 }
209
210 template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
211          typename T = armnn::ResolveType<ArmnnType>>
212 LayerTestResult<T, 4> SimpleConvolution2d3x3TestCommon(
213     armnn::IWorkloadFactory& workloadFactory,
214     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
215     float qScale,
216     int32_t qOffset,
217     bool biasEnabled,
218     const armnn::DataLayout layout)
219 {
220     // Use a 3x3 kernel, which exercises ArmCompute's direct convolution path.
221
222     // Use common single-batch 3-channel 16x8 image.
223     armnn::TensorInfo inputDesc({1, 3, 8, 16}, ArmnnType);
224     boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, QuantizedVector<T>(qScale, qOffset, ConvInput3x8x16));
225
226     // Use a 2-element batch of 3-channel 3x3 kernels.
227     armnn::TensorInfo kernelDesc({2, 3, 3, 3}, ArmnnType);
228     boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
229         QuantizedVector<T>(qScale, qOffset, {
230             1, 1, 1,
231             1, -1, 1,
232             1, 1, 1,
233
234             0, 0, 0,
235             0, 0, 0,
236             0, 0, 0,
237
238             2, 2, 2,
239             2, 2, 2,
240             2, 2, 2,
241
242
243             0, 0, 0,
244             0, 0, 0,
245             0, 0, 0,
246
247             1, 1, 1,
248             1, 1, 1,
249             1, 1, 1,
250
251             0, 0, 0,
252             0, 0, 0,
253             0, 0, 0
254         })));
255
256     // Expected output is 1 batch of a 2-channel 14x6 image.
257     armnn::TensorInfo outputDesc({1, 2, 6, 14}, ArmnnType);
258     boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
259         QuantizedVector<T>(qScale, qOffset, {
260             -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15,
261             -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16,
262             -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
263             -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
264             -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
265             -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
266
267             3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
268             3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
269             3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
270             3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
271             3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
272             3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
273         })));
274
275     return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
276         workloadFactory,
277         memoryManager,
278         input,
279         kernel,
280         GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
281         expectedOutput,
282         qScale,
283         qOffset,
284         layout);
285 }
286
287 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
288 LayerTestResult<T, 4> SimpleConvolution2d3x3NhwcTestCommon(
289     armnn::IWorkloadFactory& workloadFactory,
290     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
291     float qScale,
292     int32_t qOffset,
293     bool biasEnabled,
294     armnn::DataLayout dataLayout)
295 {
296     // Use common single-batch 5x5 image.
297
298     armnn::TensorInfo inputDesc({1, 3, 4, 1}, ArmnnType);
299     boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc,
300                                                       {
301                                                        1, 5, 2, 3,
302                                                        8, 7, 3, 6,
303                                                        3, 3, 9, 1
304                                                        });
305
306
307     // Use a 2-element batch of 3-channel 3x3 kernels.
308     armnn::TensorInfo kernelDesc({1, 3, 3, 1}, ArmnnType);
309     boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, {
310                                                                     4, 5, 6,
311                                                                     0, 0, 0,
312                                                                     3, 2, 1
313                                                                     });
314
315     // Expected output is 1 batch of a 5x5 image.
316     armnn::TensorInfo outputDesc({1, 3, 4, 1}, ArmnnType);
317
318     const std::vector<float> outputData =
319             {
320                     23, 41, 33, 21,
321                     44, 65, 76, 52,
322                     82, 85, 79, 42
323             };
324
325     boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, outputData);
326
327     return SimpleConvolution2dNhwcTestImpl<ArmnnType, ArmnnType>(
328         workloadFactory,
329         memoryManager,
330         input,
331         kernel,
332         boost::multi_array<T, 1>(),
333         expectedOutput,
334         dataLayout,
335         qScale,
336         qOffset);
337 }
338
339 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
340 LayerTestResult<T, 4> SimpleConvolution2d3x3Stride2x2TestCommon(
341         armnn::IWorkloadFactory& workloadFactory,
342         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
343         float qScale,
344         int32_t qOffset,
345         bool biasEnabled,
346         const armnn::DataLayout& dataLayout)
347 {
348     // Input is a single-batch, 1 channel, 5x5 image.
349     armnn::TensorInfo inputDesc({1, 5, 5, 1}, ArmnnType);
350     boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc,
351             {
352                 1, 5, 2, 3, 5,
353                 8, 7, 3, 6, 3,
354                 3, 3, 9, 1, 9,
355                 4, 1, 8, 1, 3,
356                 6, 8, 1, 9, 2
357             });
358
359     // Use a 3x3 kernel.
360     armnn::TensorInfo kernelDesc({1, 3, 3, 1}, ArmnnType);
361     boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc,
362             {
363                 4, 5, 6,
364                 0, 0, 0,
365                 3, 2, 1
366             });
367
368     // Expected output is a single-batch, 1 channel, 3x3 image.
369     armnn::TensorInfo outputDesc({1, 3, 3, 1}, ArmnnType);
370
371     const std::vector<T> outputData =
372             {
373                 23, 33, 24,
374                 91, 99, 48,
375                 26, 50, 19
376             };
377
378     boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, outputData);
379
380     uint32_t padLeft = 1;
381     uint32_t padTop = 1;
382     uint32_t padRight = 1;
383     uint32_t padBottom = 1;
384     uint32_t strideX  = 2;
385     uint32_t strideY  = 2;
386
387     return SimpleConvolution2dNhwcTestImpl<ArmnnType, ArmnnType>(
388         workloadFactory,
389         memoryManager,
390         input,
391         kernel,
392         boost::multi_array<T, 1>(),
393         expectedOutput,
394         dataLayout,
395         qScale,
396         qOffset,
397         padLeft,
398         padTop,
399         padRight,
400         padBottom,
401         strideX,
402         strideY);
403 }
404
405 LayerTestResult<float, 4> SimpleConvolution2d3x5Test(
406     armnn::IWorkloadFactory& workloadFactory,
407     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
408     bool biasEnabled,
409     const armnn::DataLayout layout)
410 {
411     return SimpleConvolution2d3x5TestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
412         workloadFactory, memoryManager, 0.f, 0, biasEnabled, layout);
413 }
414
415 LayerTestResult<uint8_t, 4> SimpleConvolution2d3x5Uint8Test(
416     armnn::IWorkloadFactory& workloadFactory,
417     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
418     bool biasEnabled,
419     const armnn::DataLayout layout)
420 {
421     return SimpleConvolution2d3x5TestCommon<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
422         workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
423 }
424
425 LayerTestResult<float, 4> SimpleConvolution2d3x3Test(
426     armnn::IWorkloadFactory& workloadFactory,
427     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
428     bool biasEnabled,
429     const armnn::DataLayout layout)
430 {
431     return SimpleConvolution2d3x3TestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
432         workloadFactory, memoryManager, 0.f, 0, biasEnabled, layout);
433 }
434
435 LayerTestResult<float, 4> SimpleConvolution2d3x3NhwcTest(
436     armnn::IWorkloadFactory& workloadFactory,
437     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
438     bool biasEnabled)
439 {
440     return SimpleConvolution2d3x3NhwcTestCommon<armnn::DataType::Float32>(
441         workloadFactory,
442         memoryManager,
443         0.f,
444         0,
445         biasEnabled,
446         armnn::DataLayout::NHWC);
447 }
448
449 LayerTestResult<float, 4> SimpleConvolution2d3x3Stride2x2Test(
450         armnn::IWorkloadFactory& workloadFactory,
451         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
452         bool biasEnabled,
453         const armnn::DataLayout layout)
454 {
455     return SimpleConvolution2d3x3Stride2x2TestCommon<armnn::DataType::Float32>(
456         workloadFactory,
457         memoryManager,
458         0.f,
459         0,
460         biasEnabled,
461         layout);
462 }
463
464 LayerTestResult<uint8_t, 4> SimpleConvolution2d3x3Uint8Test(
465     armnn::IWorkloadFactory& workloadFactory,
466     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
467     bool biasEnabled,
468     const armnn::DataLayout layout)
469 {
470     return SimpleConvolution2d3x3TestCommon<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
471         workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
472 }
473
474 LayerTestResult<int16_t, 4> SimpleConvolution2d3x5QSymm16Test(
475     armnn::IWorkloadFactory& workloadFactory,
476     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
477     bool biasEnabled,
478     const armnn::DataLayout layout)
479 {
480 return SimpleConvolution2d3x5TestCommon<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
481         workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
482 }
483
484 LayerTestResult<int16_t, 4> SimpleConvolution2d3x3QSymm16Test(
485     armnn::IWorkloadFactory& workloadFactory,
486     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
487     bool biasEnabled,
488     const armnn::DataLayout layout)
489 {
490     return SimpleConvolution2d3x3TestCommon<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
491             workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
492 }
493
494 template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
495          typename T = armnn::ResolveType<ArmnnType>>
496 LayerTestResult<T, 4> Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTestCommon(
497     armnn::IWorkloadFactory& workloadFactory,
498     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
499     const armnn::DataLayout layout,
500     float qScale,
501     int32_t qOffset)
502 {
503     // Use a single-batch 1-channel 3x3 image as input.
504     armnn::TensorInfo inputDesc({1, 1, 3, 3}, ArmnnType);
505     boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, std::vector<T>(
506         QuantizedVector<T>(qScale, qOffset, {
507             11,21,31,
508             12,22,32,
509             13,23,33
510         })));
511
512     // Use 1 batch of a 1-channel 2x2 kernel.
513     armnn::TensorInfo kernelDesc({1, 1, 2, 2}, ArmnnType);
514     boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
515         QuantizedVector<T>(qScale, qOffset, {
516             -11,-21,
517             -12,-22,
518         })));
519
520 // Expected output is 1 batch of a 1-channel 6x8 image.
521 // Manually calculated like this:
522 //[-11*0 -21*0  -12*0 -22*0  ; -11*0  -21*0  -12*0  -22*0  ; -11*0  -21*0  -12*0  -22*0  ; -11*0  -21*0 -12*0  -22*0 ..]
523 //[-11*0 -21*0  -12*0 -22*11 ; -11*0  -21*0  -12*11 -22*21 ; -11*0  -21*0  -12*21 -22*31 ; -11*0  -21*0 -12*31 -22*0 ..]
524 //[-11*0 -21*11 -12*0 -22*12 ; -11*11 -21*21 -12*12 -22*22 ; -11*21 -21*31 -12*22 -22*32 ; -11*31 -21*0 -12*32 -22*0 ..]
525 //[-11*0 -21*12 -12*0 -22*13 ; -11*12 -21*22 -12*13 -22*23 ; -11*22 -21*32 -12*23 -22*33 ; -11*32 -21*0 -12*33 -22*0 ..]
526 //[-11*0 -21*13 -12*0 -22*0  ; -11*13 -21*23 -12*0  -22*0  ; -11*23 -21*33 -12*0  -22*0  ; -11*33 -21*0 -12*0  -22*0 ..]
527 //[-11*0 -21*0  -12*0 -22*0  ; -11*0  -21*0  -12*0  -22*0  ; -11*0  -21*0  -12*0  -22*0  ; -11*0  -21*0 -12*0  -22*0 ..]
528 //[..... .....  ..... .....  ; .....  .....  .....  .....  ; .....  .....  .....  .....  ; .....  ..... .....  ..... ..]
529     armnn::TensorInfo outputDesc({1, 1, 8, 6}, ArmnnType);
530     boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
531         QuantizedVector<T>(qScale, qOffset, {
532                0,    0,      0,    0,    0,    0,
533             -242,  -594,  -934, -372,    0,    0,
534             -495, -1190, -1850, -725,    0,    0,
535             -538, -1256, -1916, -748,    0,    0,
536             -273, -626,  -946,  -363,    0,    0,
537                0,    0,     0,     0,    0,    0,
538                0,    0,     0,     0,    0,    0,
539                0,    0,     0,     0,    0,    0
540         })));
541
542     return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
543         workloadFactory,
544         memoryManager,
545         input,
546         kernel,
547         GetBias2<ArmnnBType>(false, qScale * qScale),
548         expectedOutput,
549         qScale,
550         qOffset,
551         layout,
552         1,  // Padding left.
553         2,  // Padding top.
554         3,  // Padding right.
555         4); // Padding bottom.
556 }
557
558 template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
559          typename T = armnn::ResolveType<ArmnnType>>
560 LayerTestResult<T, 4> SimpleConvolution2dAsymmetricPaddingTestCommon(
561     armnn::IWorkloadFactory& workloadFactory,
562     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
563     const armnn::DataLayout layout,
564     float qScale,
565     int32_t qOffset)
566 {
567     // Use a single-batch 1-channel 5x5 image as input.
568     armnn::TensorInfo inputDesc({ 1, 1, 5, 5 }, ArmnnType);
569     boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, std::vector<T>(
570         QuantizedVector<T>(qScale, qOffset, {
571             11,21,31,41,51,
572             12,22,32,42,52,
573             13,23,33,43,53,
574             14,24,34,44,54,
575             15,25,35,45,55,
576         })));
577
578     // Use 1 batch of a 1-channel 4x4 kernel.
579     armnn::TensorInfo kernelDesc({ 1, 1, 4, 4 }, ArmnnType);
580     boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
581         QuantizedVector<T>(qScale, qOffset, {
582             -11,-21,-31,-41,
583             -12,-22,-32,-42,
584             -13,-23,-33,-43,
585             -14,-24,-34,-44,
586         })));
587
588     // Expected output is 1 batch of a 1-channel 5x5 image.
589     armnn::TensorInfo outputDesc({ 1, 1, 5, 5 }, ArmnnType);
590     std::vector<T> myVec(outputDesc.GetNumElements(), 0);
591     boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
592         QuantizedVector<T>(qScale, qOffset, {
593             -7140, -10580, -13940,  -9300, -5230,
594             -9590, -14120, -18520, -12290, -6860,
595             -9980, -14560, -18960, -12560, -7000,
596             -7518, -10904, -14144,  -9318, -5152,
597             -5032,  -7256,  -9376,  -6142, -3368,
598         })));
599
600     return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
601         workloadFactory,
602         memoryManager,
603         input,
604         kernel,
605         GetBias2<ArmnnBType>(false, qScale * qScale),
606         expectedOutput,
607         qScale,
608         qOffset,
609         layout,
610         1,  // Padding left.
611         1,  // Padding top.
612         2,  // Padding right.
613         2); // Padding bottom.
614 }
615
616 LayerTestResult<float, 4> Convolution2dAsymmetricPaddingTest(
617     armnn::IWorkloadFactory& workloadFactory,
618     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
619     armnn::DataLayout layout)
620 {
621     return SimpleConvolution2dAsymmetricPaddingTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
622             workloadFactory, memoryManager, layout, 0.0f, 0);
623 }
624
625 LayerTestResult<float, 4> Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTest(
626     armnn::IWorkloadFactory& workloadFactory,
627     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
628     armnn::DataLayout layout)
629 {
630     return Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTestCommon
631             <armnn::DataType::Float32, armnn::DataType::Float32>(
632             workloadFactory, memoryManager, layout, 0.0f, 0);
633 }
634
635 LayerTestResult<float, 4> Convolution1dTest(
636     armnn::IWorkloadFactory& workloadFactory,
637     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
638     bool biasEnabled)
639 {
640     return Convolution1dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
641             workloadFactory, memoryManager, 0.0f, 0, biasEnabled);
642 }
643
644 LayerTestResult<uint8_t, 4> Convolution1dUint8Test(
645     armnn::IWorkloadFactory& workloadFactory,
646     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
647     bool biasEnabled)
648 {
649     return Convolution1dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
650             workloadFactory, memoryManager, 0.1f, 128, biasEnabled);
651 }
652
653 LayerTestResult<float,4> CompareConvolution2dTest(
654     armnn::IWorkloadFactory& workloadFactory,
655     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
656     armnn::IWorkloadFactory& refWorkloadFactory)
657 {
658     return CompareConvolution2dTestImpl<armnn::DataType::Float32>(
659             workloadFactory, memoryManager, refWorkloadFactory);
660 }
661
662 template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
663 LayerTestResult<T, 4> Convolution2d3x3DilationTestCommon(
664     armnn::IWorkloadFactory& workloadFactory,
665     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
666     const std::vector<float>& inputNoQuantizedValues,
667     armnn::TensorInfo& inputTensorInfo,
668     const std::vector<float>& kernelNoQuantizedValues,
669     armnn::TensorInfo& kernelTensorInfo,
670     const std::vector<float>& outputExpectedNoQuantizedValues,
671     armnn::TensorInfo& outputTensorInfo,
672     uint32_t dilationX,
673     uint32_t dilationY,
674     armnn::DataLayout layout = armnn::DataLayout::NCHW,
675     uint32_t padLeft = 0,
676     uint32_t padTop = 0,
677     uint32_t padRight = 0,
678     uint32_t padBottom = 0,
679     uint32_t strideX  = 1,
680     uint32_t strideY  = 1,
681     bool biasEnabled = false
682 )
683 {
684     float qScale;
685     int32_t qOffset;
686     switch (ArmnnType)
687     {
688         case armnn::DataType::QuantisedAsymm8:
689         {
690             qScale = 0.1f;
691             qOffset = 128;
692             break;
693         }
694         case armnn::DataType::QuantisedSymm16:
695         {
696             qScale = 0.1f;
697             qOffset = 0;
698             break;
699         }
700         case armnn::DataType::Float32:
701         default:
702         {
703             qScale = 0.f;
704             qOffset = 0;
705             break;
706         }
707     }
708
709     inputTensorInfo.SetQuantizationScale(qScale);
710     inputTensorInfo.SetQuantizationOffset(qOffset);
711     kernelTensorInfo.SetQuantizationScale(qScale);
712     kernelTensorInfo.SetQuantizationOffset(qOffset);
713     outputTensorInfo.SetQuantizationScale(qScale);
714     outputTensorInfo.SetQuantizationOffset(qOffset);
715
716     auto input = MakeTensor<T, 4>(inputTensorInfo,
717                                   std::vector<T>(QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(),
718                                                                     inputTensorInfo.GetQuantizationOffset(),
719                                                                     inputNoQuantizedValues)));
720     auto kernel = MakeTensor<T, 4>(kernelTensorInfo,
721                                   std::vector<T>(QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(),
722                                                                     kernelTensorInfo.GetQuantizationOffset(),
723                                                                     kernelNoQuantizedValues)));
724     auto expectedOutput = MakeTensor<T, 4>(outputTensorInfo,
725                                            std::vector<T>(QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(),
726                                                                              outputTensorInfo.GetQuantizationOffset(),
727                                                                              outputExpectedNoQuantizedValues)));
728
729     return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
730             workloadFactory,
731             memoryManager,
732             input,
733             kernel,
734             GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
735             expectedOutput,
736             qScale,
737             qOffset,
738             layout,
739             padLeft,
740             padTop,
741             padRight,
742             padBottom,
743             strideX,
744             strideY,
745             dilationX,
746             dilationY);
747 }
748
749 template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
750 LayerTestResult<T, 4> Convolution2d3x3Dilation3x3Test(
751     armnn::IWorkloadFactory& workloadFactory,
752     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
753     bool biasEnabled,
754     const armnn::DataLayout layout)
755 {
756     armnn::TensorInfo inputTensorInfo({1, 1, 10, 10}, ArmnnType);
757     std::vector<float> inputNoQuantizedValues =
758     {
759         0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
760         0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
761         0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
762         0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
763         0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
764         0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
765         0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
766         0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
767         0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
768         0, 0, 0, 0, 0, 0, 0, 0, 0, 0
769     };
770
771     armnn::TensorInfo kernelTensorInfo({ 1, 1, 3, 3}, ArmnnType);
772     std::vector<float> kernelNoQuantizedValues =
773     {
774         1, 2, 3,
775         4, 5, 6,
776         7, 8, 9
777     };
778
779     // Since the dilation rate is 3 this will dilate the kernel to be like 7x7,
780     // therefore the output will be 4x4: (I−K+2P)/S +1 => (10-7 +0)/1 +1
781     armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4}, ArmnnType);
782     std::vector<float> outputExpectedNoQuantizedValues =
783     {
784         6., 5., 5., 5.,
785         6., 5., 5., 5.,
786         6., 5., 5., 5.,
787         3., 2., 2., 2.
788     };
789
790     return Convolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
791             workloadFactory,
792             memoryManager,
793             inputNoQuantizedValues,
794             inputTensorInfo,
795             kernelNoQuantizedValues,
796             kernelTensorInfo,
797             outputExpectedNoQuantizedValues,
798             outputTensorInfo,
799             3,
800             3,
801             layout,
802             biasEnabled);
803 }
804
805 template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
806 LayerTestResult<T, 4> Convolution2d2x3x3Dilation3x3Test(
807     armnn::IWorkloadFactory& workloadFactory,
808     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
809     bool biasEnabled,
810     const armnn::DataLayout layout)
811 {
812     armnn::TensorInfo inputTensorInfo({1, 2, 10, 10}, ArmnnType);
813     std::vector<float> inputNoQuantizedValues =
814     {
815         0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
816         0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
817         0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
818         0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
819         0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
820         0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
821         0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
822         0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
823         0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
824         0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
825
826         0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
827         0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
828         0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
829         0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
830         0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
831         0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
832         0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
833         0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
834         0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
835         0, 0, 0, 0, 0, 0, 0, 0, 0, 0
836     };
837
838     armnn::TensorInfo kernelTensorInfo({ 1, 2, 3, 3}, ArmnnType);
839     std::vector<float> kernelNoQuantizedValues =
840     {
841         1, 2, 3,
842         4, 5, 6,
843         7, 8, 9,
844
845         1, 2, 3,
846         4, 5, 6,
847         7, 8, 9
848     };
849
850     // Since the dilation rate is 3 this will dilate the kernel to be like 7x7,
851     // therefore the output will be 4x4: (I−K+2P)/S +1 => (10-7 +0)/1 +1
852     armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4}, ArmnnType);
853     std::vector<float> outputExpectedNoQuantizedValues =
854     {
855         12., 10., 10., 10.,
856         12., 10., 10., 10.,
857         12., 10., 10., 10.,
858          6.,  4.,  4.,  4.
859     };
860
861     return Convolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
862             workloadFactory,
863             memoryManager,
864             inputNoQuantizedValues,
865             inputTensorInfo,
866             kernelNoQuantizedValues,
867             kernelTensorInfo,
868             outputExpectedNoQuantizedValues,
869             outputTensorInfo,
870             3,
871             3,
872             layout,
873             biasEnabled);
874 }
875
876 template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
877 LayerTestResult<T, 4> Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test(
878         armnn::IWorkloadFactory &workloadFactory,
879         const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager,
880         bool biasEnabled,
881         const armnn::DataLayout layout)
882 {
883     armnn::TensorInfo inputTensorInfo({1, 1, 10, 10}, ArmnnType);
884     std::vector<float> inputNoQuantizedValues =
885     {
886         1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
887         1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
888         1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
889         1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
890         1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
891         1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
892         1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
893         1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
894         1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
895         1, 1, 1, 1, 1, 1, 1, 1, 1, 1
896     };
897
898     armnn::TensorInfo kernelTensorInfo({ 1, 1, 2, 2}, ArmnnType);
899     std::vector<float> kernelNoQuantizedValues =
900     {
901         1, 2,
902         3, 4
903     };
904
905     // Since the dilation rate is 2 this will dilate the kernel to be like 3x3: d(K-1)+1 --> 2 x (2-1) + 1 = 3,
906     // therefore the output will be 4x4: (I − K + 2P)/S +1 => trunc ( (10 - 3 + 2x2 ) / 3 + 1 )
907     // where, dilation size = d = 2; kernel size = K = 2; input size = I = 10; padding size = P = 2; stride = S = 3
908     armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4}, ArmnnType);
909     std::vector<float> outputExpectedNoQuantizedValues =
910     {
911         4,  7,  7, 3,
912         6, 10, 10, 4,
913         6, 10, 10, 4,
914         2,  3,  3, 1
915     };
916     uint32_t padLeft = 1;
917     uint32_t padTop = 1;
918     uint32_t padRight = 1;
919     uint32_t padBottom = 1;
920
921     return Convolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
922             workloadFactory,
923             memoryManager,
924             inputNoQuantizedValues,
925             inputTensorInfo,
926             kernelNoQuantizedValues,
927             kernelTensorInfo,
928             outputExpectedNoQuantizedValues,
929             outputTensorInfo,
930             2,
931             2,
932             layout,
933             padLeft,
934             padTop,
935             padRight,
936             padBottom,
937             3,
938             3,
939             biasEnabled
940             );
941 }
942
943 template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
944 Convolution2d3x3Dilation3x3Test<armnn::DataType::Float32, armnn::DataType::Float32>(
945     armnn::IWorkloadFactory&,
946     const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
947     bool,
948     armnn::DataLayout);
949
950 template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
951 Convolution2d3x3Dilation3x3Test<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
952     armnn::IWorkloadFactory&,
953     const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
954     bool,
955     armnn::DataLayout);
956
957 template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
958 Convolution2d3x3Dilation3x3Test<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
959     armnn::IWorkloadFactory&,
960     const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
961     bool,
962     armnn::DataLayout);
963
964 template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
965 Convolution2d2x3x3Dilation3x3Test<armnn::DataType::Float32, armnn::DataType::Float32>(
966     armnn::IWorkloadFactory&,
967     const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
968     bool,
969     armnn::DataLayout);
970
971 template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
972 Convolution2d2x3x3Dilation3x3Test<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
973     armnn::IWorkloadFactory&,
974     const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
975     bool,
976     armnn::DataLayout);
977
978 template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
979 Convolution2d2x3x3Dilation3x3Test<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
980     armnn::IWorkloadFactory&,
981     const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
982     bool,
983     armnn::DataLayout);
984
985 template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
986 Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<armnn::DataType::Float32, armnn::DataType::Float32>(
987     armnn::IWorkloadFactory &workloadFactory,
988     const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager,
989     bool biasEnabled,
990     const armnn::DataLayout layout);
991
992 template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
993 Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
994     armnn::IWorkloadFactory &workloadFactory,
995     const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager,
996     bool biasEnabled,
997     const armnn::DataLayout layout);
998
999 template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
1000 Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
1001     armnn::IWorkloadFactory &workloadFactory,
1002     const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager,
1003     bool biasEnabled,
1004     const armnn::DataLayout layout);
1005
1006 template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
1007          typename T = armnn::ResolveType<ArmnnType>>
1008 LayerTestResult<T, 4> DepthwiseConvolution2dAsymmetricTestCommon(
1009     armnn::IWorkloadFactory& workloadFactory,
1010     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1011     float qScale,
1012     int32_t qOffset,
1013     bool biasEnabled,
1014     const armnn::DataLayout layout)
1015 {
1016     // Use a single-batch 2-channel 5x5 image as input.
1017     armnn::TensorInfo inputTensorInfo({ 1, 2, 5, 5 }, ArmnnType);
1018     auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
1019         QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(),
1020         {
1021              0,  1,  2,  3,  4,
1022              5,  6,  7,  8,  9,
1023             10, 11, 12, 13, 14,
1024             15, 16, 17, 18, 19,
1025             20, 21, 22, 23, 24,
1026
1027             25, 26, 27, 28, 29,
1028             30, 31, 32, 33, 34,
1029             35, 36, 37, 38, 39,
1030             40, 41, 42, 43, 44,
1031             45, 46, 47, 48, 49
1032         })));
1033
1034     // Use a depth multiplier of 1 on a 2-channel 4x4 kernel.
1035     armnn::TensorInfo kernelTensorInfo({ 1, 2, 4, 4 }, ArmnnType);
1036     auto kernel = MakeTensor<T, 4>(kernelTensorInfo, std::vector<T>(
1037         QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(),
1038         {
1039             32, 31, 30, 29,
1040             28, 27, 26, 25,
1041             24, 23, 22, 21,
1042             20, 19, 18, 17,
1043
1044             16, 15, 14, 13,
1045             12, 11, 10,  9,
1046              8,  7,  6,  5,
1047              4,  3,  2,  1
1048         })));
1049
1050     // Expected output is 1 batch of a 2-channel 5x5 image.
1051     // Calculated using the python tensorflow library with strideX=1, strideY=1.
1052     armnn::TensorInfo outputTensorInfo({ 1, 2, 5, 5 }, ArmnnType);
1053     boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(
1054         QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(),
1055         {
1056             1062, 1580, 1850, 1530, 1117,
1057             2140, 3108, 3500, 2842, 2042,
1058             3580, 5068, 5460, 4342, 3062,
1059             3618, 5072, 5390, 4248, 2971,
1060             3074, 4282, 4510, 3533, 2457,
1061
1062             1550, 2284, 2362, 1955, 1428,
1063             2910, 4206, 4342, 3528, 2536,
1064             3390, 4886, 5022, 4068, 2916,
1065             3566, 5056, 5182, 4133, 2922,
1066             3100, 4352, 4452, 3517, 2465
1067         })));
1068
1069     return DepthwiseConvolution2dAsymmetricTestImpl<ArmnnType, ArmnnBType>(
1070         workloadFactory,
1071         memoryManager,
1072         input,
1073         kernel,
1074         GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
1075         expectedOutput,
1076         qScale,
1077         qOffset,
1078         layout,
1079         1,  // Padding left.
1080         1,  // Padding top.
1081         2,  // Padding right.
1082         2,  // Padding bottom.
1083         1,  // strideX
1084         1); // strideY
1085 }
1086
1087 template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
1088          typename T = armnn::ResolveType<ArmnnType>>
1089 LayerTestResult<T, 4> DepthwiseConvolution2dNhwcTestCommon(
1090     armnn::IWorkloadFactory& workloadFactory,
1091     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1092     float qScale,
1093     int32_t qOffset,
1094     bool biasEnabled)
1095 {
1096     auto layout = armnn::DataLayout::NHWC;
1097
1098     armnn::TensorInfo inputTensorInfo({ 1, 2, 5, 5}, ArmnnType);
1099     auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
1100         QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(),
1101         {
1102              0,  1,  2,  3,  4,
1103              5,  6,  7,  8,  9,
1104             10, 11, 12, 13, 14,
1105             15, 16, 17, 18, 19,
1106             20, 21, 22, 23, 24,
1107
1108             25, 26, 27, 28, 29,
1109             30, 31, 32, 33, 34,
1110             35, 36, 37, 38, 39,
1111             40, 41, 42, 43, 44,
1112             45, 46, 47, 48, 49
1113         })));
1114
1115     armnn::TensorInfo kernelTensorInfo({ 1, 2, 4, 4 }, ArmnnType);
1116     auto kernel = MakeTensor<T, 4>(kernelTensorInfo, std::vector<T>(
1117         QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(),
1118         {
1119              32, 31, 30, 29,
1120              28, 27, 26, 25,
1121              24, 23, 22, 21,
1122              20, 19, 18, 17,
1123
1124              16, 15, 14, 13,
1125              12, 11, 10,  9,
1126               8,  7,  6,  5,
1127               4,  3,  2,  1
1128         })));
1129
1130     armnn::TensorInfo outputTensorInfo({ 1, 2, 5, 5}, ArmnnType);
1131     boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(
1132         QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(),
1133         {
1134             1062, 1580, 1850, 1530, 1117,
1135             2140, 3108, 3500, 2842, 2042,
1136             3580, 5068, 5460, 4342, 3062,
1137             3618, 5072, 5390, 4248, 2971,
1138             3074, 4282, 4510, 3533, 2457,
1139
1140             1550, 2284, 2362, 1955, 1428,
1141             2910, 4206, 4342, 3528, 2536,
1142             3390, 4886, 5022, 4068, 2916,
1143             3566, 5056, 5182, 4133, 2922,
1144             3100, 4352, 4452, 3517, 2465
1145         })));
1146
1147     return DepthwiseConvolution2dTestImpl<ArmnnType, ArmnnBType>(
1148         workloadFactory,
1149         memoryManager,
1150         input,
1151         kernel,
1152         GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
1153         expectedOutput,
1154         qScale,
1155         qOffset,
1156         layout,
1157         1,  // Padding left.
1158         1,  // Padding top.
1159         2,  // Padding right.
1160         2,  // Padding bottom.
1161         1,  // strideX
1162         1);  // strideY
1163 }
1164
1165 template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
1166          typename T = armnn::ResolveType<ArmnnType>>
1167 LayerTestResult<T, 4> SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTestCommon(
1168     armnn::IWorkloadFactory& workloadFactory,
1169     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1170     float qScale,
1171     int32_t qOffset,
1172     bool biasEnabled)
1173 {
1174     auto layout = armnn::DataLayout::NHWC;
1175
1176     armnn::TensorInfo inputTensorInfo({ 1, 1, 9, 9}, ArmnnType);
1177     auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
1178         QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(),
1179         {
1180              0, 0, 0, 0, 0, 0, 0, 0, 0,
1181              0, 0, 0, 0, 0, 0, 0, 0, 0,
1182              0, 0, 0, 0, 0, 0, 0, 0, 0,
1183              0, 0, 0, 1, 1, 1, 0, 0, 0,
1184              0, 0, 0, 1, 1, 1, 0, 0, 0,
1185              0, 0, 0, 1, 1, 1, 0, 0, 0,
1186              0, 0, 0, 0, 0, 0, 0, 0, 0,
1187              0, 0, 0, 0, 0, 0, 0, 0, 0,
1188              0, 0, 0, 0, 0, 0, 0, 0, 0
1189         })));
1190
1191     armnn::TensorInfo kernelTensorInfo({ 1, 1, 3, 3}, ArmnnType);
1192     auto kernel = MakeTensor<T, 4>(kernelTensorInfo, std::vector<T>(
1193         QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(),
1194         {
1195              1, 2, 3,
1196              4, 5, 6,
1197              7, 8, 9
1198         })));
1199
1200     uint32_t padLeft = 0;
1201     uint32_t padTop = 0;
1202     uint32_t padRight = 0;
1203     uint32_t padBottom = 0;
1204     uint32_t strideX  = 1;
1205     uint32_t strideY  = 1;
1206     uint32_t dilationX  = 3;
1207     uint32_t dilationY  = 3;
1208
1209     // Since the dilation rate is 3 this will reduce the size of the output from 9x9 to 3x3 of all 5s.
1210     armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3}, ArmnnType);
1211     boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(
1212         QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(),
1213         {
1214              5, 5, 5,
1215              5, 5, 5,
1216              5, 5, 5
1217         })));
1218
1219     return DepthwiseConvolution2dTestImpl<ArmnnType, ArmnnBType>(
1220         workloadFactory,
1221         memoryManager,
1222         input,
1223         kernel,
1224         GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
1225         expectedOutput,
1226         qScale,
1227         qOffset,
1228         layout,
1229         padLeft,
1230         padTop,
1231         padRight,
1232         padBottom,
1233         strideX,
1234         strideY,
1235         dilationX,
1236         dilationY);
1237 }
1238
1239
1240 template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
1241 LayerTestResult<T, 4> DepthwiseConvolution2d3x3DilationTestCommon(
1242         armnn::IWorkloadFactory& workloadFactory,
1243         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1244         const std::vector<float>& inputNoQuantizedValues,
1245         armnn::TensorInfo& inputTensorInfo,
1246         const std::vector<float>& kernelNoQuantizedValues,
1247         armnn::TensorInfo& kernelTensorInfo,
1248         const std::vector<float>& outputExpectedNoQuantizedValues,
1249         armnn::TensorInfo& outputTensorInfo,
1250         uint32_t dilationX,
1251         uint32_t dilationY,
1252         armnn::DataLayout layout = armnn::DataLayout::NCHW,
1253         bool biasEnabled = false)
1254 {
1255     float qScale;
1256     int32_t qOffset;
1257     switch (ArmnnType)
1258     {
1259         case armnn::DataType::QuantisedAsymm8:
1260         {
1261             qScale = 0.1f;
1262             qOffset = 128;
1263             break;
1264         }
1265         case armnn::DataType::QuantisedSymm16:
1266         {
1267             qScale = 0.1f;
1268             qOffset = 0;
1269             break;
1270         }
1271         case armnn::DataType::Float32:
1272         default:
1273         {
1274             qScale = 0.f;
1275             qOffset = 0;
1276             break;
1277         }
1278     }
1279
1280     inputTensorInfo.SetQuantizationScale(qScale);
1281     inputTensorInfo.SetQuantizationOffset(qOffset);
1282     kernelTensorInfo.SetQuantizationScale(qScale);
1283     kernelTensorInfo.SetQuantizationOffset(qOffset);
1284     outputTensorInfo.SetQuantizationScale(qScale);
1285     outputTensorInfo.SetQuantizationOffset(qOffset);
1286
1287     auto input = MakeTensor<T, 4>(inputTensorInfo,
1288                                   std::vector<T>(QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(),
1289                                                                     inputTensorInfo.GetQuantizationOffset(),
1290                                                                     inputNoQuantizedValues)));
1291     auto kernel = MakeTensor<T, 4>(kernelTensorInfo,
1292                                    std::vector<T>(QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(),
1293                                                                      kernelTensorInfo.GetQuantizationOffset(),
1294                                                                      kernelNoQuantizedValues)));
1295     auto expectedOutput = MakeTensor<T, 4>(outputTensorInfo,
1296                                            std::vector<T>(QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(),
1297                                                                              outputTensorInfo.GetQuantizationOffset(),
1298                                                                              outputExpectedNoQuantizedValues)));
1299
1300     uint32_t padLeft = 0;
1301     uint32_t padTop = 0;
1302     uint32_t padRight = 0;
1303     uint32_t padBottom = 0;
1304     uint32_t strideX  = 1;
1305     uint32_t strideY  = 1;
1306
1307     return DepthwiseConvolution2dTestImpl<ArmnnType, ArmnnBType>(
1308             workloadFactory,
1309             memoryManager,
1310             input,
1311             kernel,
1312             GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
1313             expectedOutput,
1314             qScale,
1315             qOffset,
1316             layout,
1317             padLeft,
1318             padTop,
1319             padRight,
1320             padBottom,
1321             strideX,
1322             strideY,
1323             dilationX,
1324             dilationY);
1325 }
1326
1327 template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
1328 LayerTestResult<T, 4> DepthwiseConvolution2d3x3Dilation3x3Test(
1329         armnn::IWorkloadFactory& workloadFactory,
1330         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1331         bool biasEnabled,
1332         const armnn::DataLayout layout)
1333 {
1334     armnn::TensorInfo inputTensorInfo({1, 1, 10, 10}, ArmnnType);
1335     std::vector<float> inputNoQuantizedValues =
1336             {
1337                     0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1338                     0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1339                     0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1340                     0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1341                     0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1342                     0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1343                     0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1344                     0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1345                     0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1346                     0, 0, 0, 0, 0, 0, 0, 0, 0, 0
1347             };
1348
1349     armnn::TensorInfo kernelTensorInfo({ 1, 1, 3, 3}, ArmnnType);
1350     std::vector<float> kernelNoQuantizedValues =
1351             {
1352                     1, 2, 3,
1353                     4, 5, 6,
1354                     7, 8, 9
1355             };
1356
1357     // Since the dilation rate is 3 this will dilate the kernel to be like 7x7,
1358     // therefore the output will be 4x4: (I−K+2P)/S +1 => (10-7 +0)/1 +1
1359     armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4}, ArmnnType);
1360     std::vector<float> outputExpectedNoQuantizedValues =
1361             {
1362                     6., 5., 5., 5.,
1363                     6., 5., 5., 5.,
1364                     6., 5., 5., 5.,
1365                     3., 2., 2., 2.
1366             };
1367
1368     return DepthwiseConvolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
1369             workloadFactory,
1370             memoryManager,
1371             inputNoQuantizedValues,
1372             inputTensorInfo,
1373             kernelNoQuantizedValues,
1374             kernelTensorInfo,
1375             outputExpectedNoQuantizedValues,
1376             outputTensorInfo,
1377             3,
1378             3,
1379             layout,
1380             biasEnabled);
1381 }
1382
1383 template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
1384 LayerTestResult<T, 4> DepthwiseConvolution2d2x3x3Dilation3x3Test(
1385         armnn::IWorkloadFactory& workloadFactory,
1386         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1387         bool biasEnabled,
1388         const armnn::DataLayout layout)
1389 {
1390     armnn::TensorInfo inputTensorInfo({1, 2, 10, 10}, ArmnnType);
1391     std::vector<float> inputNoQuantizedValues =
1392             {
1393                     0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1394                     0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1395                     0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1396                     0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1397                     0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1398                     0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1399                     0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1400                     0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1401                     0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1402                     0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1403
1404                     0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1405                     0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1406                     0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1407                     0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1408                     0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1409                     0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1410                     0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1411                     0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1412                     0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1413                     0, 0, 0, 0, 0, 0, 0, 0, 0, 0
1414             };
1415
1416     armnn::TensorInfo kernelTensorInfo({ 1, 2, 3, 3}, ArmnnType);
1417     std::vector<float> kernelNoQuantizedValues =
1418             {
1419                     1, 2, 3,
1420                     4, 5, 6,
1421                     7, 8, 9,
1422
1423                     1, 2, 3,
1424                     4, 5, 6,
1425                     7, 8, 9
1426             };
1427
1428     // Since the dilation rate is 3 this will dilate the kernel to be like 7x7,
1429     // therefore the output will be 2x4x4: (I−K+2P)/S +1 => (10-7 +0)/1 +1
1430     armnn::TensorInfo outputTensorInfo({ 1, 2, 4, 4}, ArmnnType);
1431     std::vector<float> outputExpectedNoQuantizedValues =
1432             {
1433                     6., 5., 5., 5.,
1434                     6., 5., 5., 5.,
1435                     6., 5., 5., 5.,
1436                     3., 2., 2., 2.,
1437
1438                     6., 5., 5., 5.,
1439                     6., 5., 5., 5.,
1440                     6., 5., 5., 5.,
1441                     3., 2., 2., 2.
1442             };
1443
1444     return DepthwiseConvolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
1445             workloadFactory,
1446             memoryManager,
1447             inputNoQuantizedValues,
1448             inputTensorInfo,
1449             kernelNoQuantizedValues,
1450             kernelTensorInfo,
1451             outputExpectedNoQuantizedValues,
1452             outputTensorInfo,
1453             3,
1454             3,
1455             layout,
1456             biasEnabled);
1457 }
1458
1459
1460 template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
1461 DepthwiseConvolution2d3x3Dilation3x3Test<armnn::DataType::Float32, armnn::DataType::Float32>(
1462         armnn::IWorkloadFactory&,
1463         const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
1464         bool,
1465         armnn::DataLayout);
1466
1467 template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
1468 DepthwiseConvolution2d3x3Dilation3x3Test<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
1469         armnn::IWorkloadFactory&,
1470         const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
1471         bool,
1472         armnn::DataLayout);
1473
1474 template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
1475 DepthwiseConvolution2d3x3Dilation3x3Test<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
1476         armnn::IWorkloadFactory&,
1477         const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
1478         bool,
1479         armnn::DataLayout);
1480
1481 template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
1482 DepthwiseConvolution2d2x3x3Dilation3x3Test<armnn::DataType::Float32, armnn::DataType::Float32>(
1483         armnn::IWorkloadFactory&,
1484         const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
1485         bool,
1486         armnn::DataLayout);
1487
1488 template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
1489 DepthwiseConvolution2d2x3x3Dilation3x3Test<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
1490         armnn::IWorkloadFactory&,
1491         const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
1492         bool,
1493         armnn::DataLayout);
1494
1495 template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
1496 DepthwiseConvolution2d2x3x3Dilation3x3Test<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
1497         armnn::IWorkloadFactory&,
1498         const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
1499         bool,
1500         armnn::DataLayout);
1501
1502 LayerTestResult<float, 4> DepthwiseConvolution2dTest(
1503     armnn::IWorkloadFactory& workloadFactory,
1504     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1505     bool biasEnabled,
1506     const armnn::DataLayout layout)
1507 {
1508     return DepthwiseConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
1509         workloadFactory, memoryManager, 0.0f, 0, biasEnabled, layout);
1510 }
1511
1512 LayerTestResult<float, 4> DepthwiseConvolution2dDepthNhwcTest(
1513     armnn::IWorkloadFactory& workloadFactory,
1514     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1515     bool biasEnabled)
1516 {
1517     return DepthwiseConvolution2dNhwcTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
1518         workloadFactory, memoryManager, 0.0f, 0, biasEnabled);
1519 }
1520
1521 LayerTestResult<float, 4> DepthwiseConvolution2dDepthMul1Test(
1522     armnn::IWorkloadFactory& workloadFactory,
1523     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1524     bool biasEnabled,
1525     const armnn::DataLayout layout)
1526 {
1527     return DepthwiseConvolution2dDepthMul1TestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
1528         workloadFactory, memoryManager, 0.0f, 0, biasEnabled, layout);
1529 }
1530
1531 LayerTestResult<float, 4> DepthwiseConvolution2dDepthMul64Test(
1532     armnn::IWorkloadFactory& workloadFactory,
1533     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1534 {
1535     armnn::TensorInfo inputTensorInfo({ 1, 1, 2, 2 }, armnn::DataType::Float32);
1536     auto input = MakeTensor<float, 4>(inputTensorInfo, { 1.f, 2.f, 3.f, 4.f });
1537
1538     std::vector<float> kernelData;
1539     std::vector<float> singleDepthKernel{ 1.f, -1.f, -1.f, 1.f };
1540     for (unsigned int i = 0; i < 64; ++i)
1541     {
1542         kernelData.insert(kernelData.end(), singleDepthKernel.begin(), singleDepthKernel.end());
1543     }
1544     armnn::TensorInfo kernelTensorInfo({ 64, 1, 2, 2 }, armnn::DataType::Float32);
1545     auto kernel = MakeTensor<float, 4>(kernelTensorInfo, kernelData);
1546
1547     std::vector<float> expectedOutputData(64, 0.f);
1548     armnn::TensorInfo outputTensorInfo({ 1, 64, 1, 1 }, armnn::DataType::Float32);
1549     auto expectedOutput = MakeTensor<float, 4>(outputTensorInfo, expectedOutputData);
1550
1551     return DepthwiseConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
1552             workloadFactory,
1553             memoryManager,
1554             input,
1555             kernel,
1556             boost::multi_array<float, 1>(),
1557             expectedOutput,
1558             0.f,
1559             0,
1560             armnn::DataLayout::NCHW);
1561 }
1562
1563 LayerTestResult<float, 4> DepthwiseConvolution2dAsymmetricTest(
1564     armnn::IWorkloadFactory& workloadFactory,
1565     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1566     bool biasEnabled,
1567     const armnn::DataLayout layout)
1568 {
1569     return DepthwiseConvolution2dAsymmetricTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
1570         workloadFactory, memoryManager, 0.0f, 0, biasEnabled, layout);
1571 }
1572
1573 LayerTestResult<uint8_t, 4> DepthwiseConvolution2dUint8Test(
1574     armnn::IWorkloadFactory& workloadFactory,
1575     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1576     bool biasEnabled,
1577     const armnn::DataLayout layout)
1578 {
1579     return DepthwiseConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
1580         workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
1581 }
1582
1583 LayerTestResult<uint8_t, 4> DepthwiseConvolution2dDepthMul1Uint8Test(
1584     armnn::IWorkloadFactory& workloadFactory,
1585     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1586     bool biasEnabled,
1587     const armnn::DataLayout layout)
1588 {
1589     return DepthwiseConvolution2dDepthMul1TestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
1590         workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
1591 }
1592
1593 LayerTestResult<float, 4> SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTest(
1594         armnn::IWorkloadFactory& workloadFactory,
1595         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1596 {
1597     return SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
1598             workloadFactory,
1599             memoryManager,
1600             0.f,
1601             0,
1602             false);
1603 }
1604
1605 LayerTestResult<int16_t, 4> DepthwiseConvolution2dInt16Test(
1606         armnn::IWorkloadFactory& workloadFactory,
1607         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1608         bool biasEnabled,
1609         const armnn::DataLayout layout)
1610 {
1611     return DepthwiseConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
1612         workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
1613 }
1614
1615 LayerTestResult<int16_t, 4> DepthwiseConvolution2dDepthMul1Int16Test(
1616                 armnn::IWorkloadFactory& workloadFactory,
1617                 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1618                 bool biasEnabled,
1619                 const armnn::DataLayout layout)
1620 {
1621     return DepthwiseConvolution2dDepthMul1TestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
1622         workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
1623 }
1624
1625 LayerTestResult<float, 4> CompareDepthwiseConvolution2dFloatTest(
1626     armnn::IWorkloadFactory& workloadFactory,
1627     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1628     armnn::IWorkloadFactory& refWorkloadFactory,
1629     const armnn::DataLayout layout)
1630 {
1631     return CompareDepthwiseConvolution2dTestImpl<armnn::DataType::Float32>(
1632         workloadFactory, memoryManager, refWorkloadFactory, layout);
1633 }
1634
1635 LayerTestResult<uint8_t, 4> CompareDepthwiseConvolution2dUint8Test(
1636     armnn::IWorkloadFactory& workloadFactory,
1637     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1638     armnn::IWorkloadFactory& refWorkloadFactory,
1639     const armnn::DataLayout layout)
1640 {
1641     return CompareDepthwiseConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8>(
1642         workloadFactory, memoryManager, refWorkloadFactory, layout);
1643 }
1644
1645 LayerTestResult<float,4> SimpleNormalizationAcrossTest(
1646     armnn::IWorkloadFactory& workloadFactory,
1647     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1648 {
1649     auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
1650     auto normChannel = armnn::NormalizationAlgorithmChannel::Across;
1651     return SimpleNormalizationTestImpl(workloadFactory, memoryManager, normChannel, normMethod);
1652 }
1653
1654 LayerTestResult<float,4> SimpleNormalizationWithinTest(
1655     armnn::IWorkloadFactory& workloadFactory,
1656     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1657 {
1658     auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
1659     auto normChannel = armnn::NormalizationAlgorithmChannel::Within;
1660     return SimpleNormalizationTestImpl(workloadFactory, memoryManager, normChannel, normMethod);
1661 }
1662
1663 LayerTestResult<float,4> SimpleNormalizationAcrossNhwcTest(
1664     armnn::IWorkloadFactory& workloadFactory,
1665     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1666 {
1667     auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
1668     auto normChannel = armnn::NormalizationAlgorithmChannel::Across;
1669     return SimpleNormalizationNhwcTestImpl(workloadFactory, memoryManager, normChannel, normMethod);
1670 }
1671
1672 LayerTestResult<float,2> SimpleSoftmaxTest(
1673     armnn::IWorkloadFactory& workloadFactory,
1674     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1675     float beta)
1676 {
1677     return SimpleSoftmaxTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, beta);
1678 }
1679
1680 LayerTestResult<float,2> SimpleAxisSoftmaxTest(
1681         armnn::IWorkloadFactory& workloadFactory,
1682         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1683         float beta,
1684         int axis)
1685 {
1686     return SimpleSoftmaxTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, beta, axis);
1687 }
1688
1689 LayerTestResult<float,3> Simple3dSoftmaxTest(
1690         armnn::IWorkloadFactory& workloadFactory,
1691         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1692         float beta)
1693 {
1694     Simple3dSoftmaxOutputData data;
1695     return Simple3dSoftmaxTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, beta,
1696                                                              data.inputShape, data.outputData, data.inputData);
1697 }
1698
1699 LayerTestResult<float,3> Simple3dAxisSoftmaxTest(
1700         armnn::IWorkloadFactory& workloadFactory,
1701         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1702         float beta,
1703         int axis)
1704 {
1705     armnn::TensorShape inputShape;
1706     std::vector<float> inputData;
1707     std::vector<float> outputData;
1708     switch (axis)
1709     {
1710     case -3:
1711     case 0:
1712         {
1713             inputShape = {5, 2, 2};
1714
1715             inputData =
1716                     {
1717                             17.0f, -1.0f, 17.0f, -1.0f, 16.0f, -2.0f, 16.0f, -2.0f, 15.0f, -3.0f,
1718
1719                             15.0f, -3.0f, 14.0f, -4.0f, 14.0f, -4.0f, 1.0f, -17.0f, 1.0f, -17.0f
1720                     };
1721
1722             outputData =
1723                     {
1724                             0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f,
1725                             0.236882800924671f,
1726                             0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f,
1727                             0.087144312427294f,
1728
1729                             0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f,
1730                             0.032058600957022f,
1731                             0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f,
1732                             7.246299848982885e-08f
1733                     };
1734             break;
1735         }
1736     case -2:
1737     case 1:
1738         {
1739             inputShape = {2, 5, 2};
1740
1741             inputData =
1742                     {
1743                             17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f,
1744
1745                             17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f
1746                     };
1747
1748             outputData =
1749                     {
1750                             0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f,
1751                             0.087144312427294f,
1752                             0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f,
1753                             7.246299848982885e-08f,
1754
1755                             0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f,
1756                             0.087144312427294f,
1757                             0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f,
1758                             7.246299848982885e-08f
1759                     };
1760         break;
1761         }
1762     case -1:
1763     case 2:
1764         {
1765             inputShape = {2, 2, 5};
1766
1767             inputData =
1768                     {
1769                             17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f,
1770                             17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f
1771                     };
1772
1773             outputData =
1774                     {
1775                             0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
1776                             7.246299848982885e-08f,
1777                             0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
1778                             7.246299848982885e-08f,
1779
1780                             0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
1781                             7.246299848982885e-08f,
1782                             0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
1783                             7.246299848982885e-08f
1784                     };
1785             break;
1786         }
1787     }
1788
1789     return Simple3dSoftmaxTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, beta,
1790                                                              inputShape, outputData, inputData, axis);
1791 }
1792
1793 LayerTestResult<float,4> Simple4dSoftmaxTest(
1794         armnn::IWorkloadFactory& workloadFactory,
1795         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1796         float beta)
1797 {
1798     Simple4dSoftmaxData data;
1799     return Simple4dSoftmaxTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, beta, data.inputShape,
1800                                                              data.outputData, data.inputData);
1801 }
1802
1803 LayerTestResult<float,4> Simple4dAxisSoftmaxTest(
1804         armnn::IWorkloadFactory& workloadFactory,
1805         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1806         float beta,
1807         int axis)
1808 {
1809     armnn::TensorShape inputShape;
1810     std::vector<float> inputData;
1811     std::vector<float> outputData;
1812     switch (axis)
1813     {
1814     case -4:
1815     case 0:
1816         {
1817             inputShape = {5, 2, 2, 2};
1818
1819             inputData =
1820                     {
1821                             17.0f, -1.0f, 17.0f, -1.0f, 17.0f, -1.0f, 17.0f, -1.0f, 16.0f, -2.0f,
1822                             16.0f, -2.0f, 16.0f, -2.0f, 16.0f, -2.0f, 15.0f, -3.0f, 15.0f, -3.0f,
1823                             15.0f, -3.0f, 15.0f, -3.0f, 14.0f, -4.0f, 14.0f, -4.0f, 14.0f, -4.0f,
1824                             14.0f, -4.0f, 1.0f, -17.0f, 1.0f, -17.0f, 1.0f, -17.0f, 1.0f, -17.0f
1825                     };
1826
1827             outputData =
1828                     {
1829                             0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f,
1830                             0.643914213228014f,
1831                             0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.236882800924671f,
1832                             0.236882800924671f,
1833                             0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.236882800924671f,
1834                             0.236882800924671f,
1835                             0.236882800924671f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f,
1836                             0.087144312427294f,
1837
1838                             0.087144312427294f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f,
1839                             0.032058600957022f,
1840                             0.032058600957022f, 0.032058600957022f, 0.032058600957022f, 0.032058600957022f,
1841                             0.032058600957022f,
1842                             0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f,
1843                             7.246299848982885e-08f,
1844                             7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f,
1845                             7.246299848982885e-08f, 7.246299848982885e-08f
1846                     };
1847             break;
1848         }
1849     case -3:
1850     case 1:
1851         {
1852             inputShape = {2, 5, 2, 2};
1853
1854             inputData =
1855                     {
1856                             17.0f, -1.0f, 17.0f, -1.0f, 16.0f, -2.0f, 16.0f, -2.0f, 15.0f, -3.0f,
1857                             15.0f, -3.0f, 14.0f, -4.0f, 14.0f, -4.0f, 1.0f, -17.0f, 1.0f, -17.0f,
1858                             17.0f, -1.0f, 17.0f, -1.0f, 16.0f, -2.0f, 16.0f, -2.0f, 15.0f, -3.0f,
1859                             15.0f, -3.0f, 14.0f, -4.0f, 14.0f, -4.0f, 1.0f, -17.0f, 1.0f, -17.0f
1860                     };
1861
1862             outputData =
1863                     {
1864                             0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f,
1865                             0.236882800924671f,
1866                             0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f,
1867                             0.087144312427294f,
1868                             0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f,
1869                             0.032058600957022f,
1870                             0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f,
1871                             7.246299848982885e-08f,
1872
1873
1874                             0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f,
1875                             0.236882800924671f,
1876                             0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f,
1877                             0.087144312427294f,
1878                             0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f,
1879                             0.032058600957022f,
1880                             0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f,
1881                             7.246299848982885e-08f
1882                     };
1883             break;
1884         }
1885     case -2:
1886     case 2:
1887         {
1888         inputShape = {2, 2, 5, 2};
1889
1890         inputData =
1891                 {
1892                         17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f,
1893                         17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f,
1894                         17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f,
1895                         17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f
1896                 };
1897
1898         outputData =
1899                 {
1900                         0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f,
1901                         0.087144312427294f,
1902                         0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f,
1903                         7.246299848982885e-08f,
1904                         0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f,
1905                         0.087144312427294f,
1906                         0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f,
1907                         7.246299848982885e-08f,
1908
1909                         0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f,
1910                         0.087144312427294f,
1911                         0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f,
1912                         7.246299848982885e-08f,
1913                         0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f,
1914                         0.087144312427294f,
1915                         0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f,
1916                         7.246299848982885e-08f
1917                 };
1918         break;
1919         }
1920     case -1:
1921     case 3:
1922         {
1923             inputShape = {2, 2, 2, 5};
1924
1925             inputData =
1926                     {
1927                             17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f,
1928                             17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f,
1929                             17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f,
1930                             17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f
1931                     };
1932
1933             outputData =
1934                     {
1935                             0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
1936                             7.246299848982885e-08f,
1937                             0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
1938                             7.246299848982885e-08f,
1939                             0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
1940                             7.246299848982885e-08f,
1941                             0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
1942                             7.246299848982885e-08f,
1943
1944                             0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
1945                             7.246299848982885e-08f,
1946                             0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
1947                             7.246299848982885e-08f,
1948                             0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
1949                             7.246299848982885e-08f,
1950                             0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
1951                             7.246299848982885e-08f
1952                     };
1953             break;
1954         }
1955     }
1956
1957     return Simple4dSoftmaxTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, beta, inputShape,
1958                                                              outputData, inputData, axis);
1959 }
1960
1961 LayerTestResult<uint8_t,2> SimpleSoftmaxUint8Test(
1962     armnn::IWorkloadFactory& workloadFactory,
1963     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1964     float beta)
1965 {
1966     return SimpleSoftmaxTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, beta);
1967 }
1968
1969 LayerTestResult<uint8_t,3> Simple3dSoftmaxUint8Test(
1970         armnn::IWorkloadFactory& workloadFactory,
1971         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1972         float beta)
1973 {
1974     Simple3dSoftmaxOutputData data;
1975     return Simple3dSoftmaxTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, beta,
1976                                                                      data.inputShape, data.outputData, data.inputData);
1977 }
1978
1979 LayerTestResult<uint8_t,4> Simple4dSoftmaxUint8Test(
1980         armnn::IWorkloadFactory& workloadFactory,
1981         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1982         float beta)
1983 {
1984     Simple4dSoftmaxData data;
1985
1986     return Simple4dSoftmaxTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, beta,
1987                                                                      data.inputShape, data.outputData, data.inputData);
1988 }
1989
1990 LayerTestResult<int16_t,2> SimpleSoftmaxUint16Test(
1991         armnn::IWorkloadFactory& workloadFactory,
1992         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1993         float beta)
1994 {
1995     return SimpleSoftmaxTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, beta);
1996 }
1997
1998 LayerTestResult<int16_t,3> Simple3dSoftmaxUint16Test(
1999         armnn::IWorkloadFactory& workloadFactory,
2000         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2001         float beta)
2002 {
2003     Simple3dSoftmaxOutputData data;
2004     return Simple3dSoftmaxTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, beta,
2005                                                                      data.inputShape, data.outputData, data.inputData);
2006 }
2007
2008 LayerTestResult<int16_t,4> Simple4dSoftmaxUint16Test(
2009         armnn::IWorkloadFactory& workloadFactory,
2010         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2011         float beta)
2012 {
2013     Simple4dSoftmaxData data;
2014
2015     return Simple4dSoftmaxTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, beta,
2016                                                                      data.inputShape, data.outputData, data.inputData);
2017 }
2018
2019 LayerTestResult<float,4> CompareNormalizationTest(
2020     armnn::IWorkloadFactory& workloadFactory,
2021     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2022     armnn::IWorkloadFactory& refWorkloadFactory,
2023     armnn::NormalizationAlgorithmChannel normChannel,
2024     armnn::NormalizationAlgorithmMethod normMethod)
2025 {
2026     return CompareNormalizationTestImpl(workloadFactory, memoryManager, refWorkloadFactory, normChannel, normMethod);
2027 }
2028
2029 LayerTestResult<float,2> CompareSoftmaxTest(
2030     armnn::IWorkloadFactory& workloadFactory,
2031     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2032     armnn::IWorkloadFactory& refWorkloadFactory,
2033     float beta)
2034 {
2035     return CompareSoftmaxTestImpl<armnn::DataType::Float32>(
2036         workloadFactory, memoryManager, refWorkloadFactory, beta);
2037 }
2038
2039 LayerTestResult<uint8_t,2> CompareSoftmaxUint8Test(
2040     armnn::IWorkloadFactory& workloadFactory,
2041     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2042     armnn::IWorkloadFactory& refWorkloadFactory,
2043     float beta)
2044 {
2045     return CompareSoftmaxTestImpl<armnn::DataType::QuantisedAsymm8>(
2046         workloadFactory, memoryManager, refWorkloadFactory, beta);
2047 }
2048
2049 std::vector<LayerTestResult<float,3>> SplitterTest(
2050     armnn::IWorkloadFactory& workloadFactory,
2051     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2052 {
2053     return SplitterTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
2054 }
2055
2056 std::vector<LayerTestResult<uint8_t,3>> SplitterUint8Test(
2057     armnn::IWorkloadFactory& workloadFactory,
2058     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2059 {
2060     return SplitterTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
2061 }
2062
2063 std::vector<LayerTestResult<int16_t,3>> SplitterInt16Test(
2064     armnn::IWorkloadFactory& workloadFactory,
2065     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2066 {
2067     return SplitterTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.0f, 0);
2068 }
2069
2070 LayerTestResult<float, 3> CopyViaSplitterTest(
2071     armnn::IWorkloadFactory& workloadFactory,
2072     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2073 {
2074     return CopyViaSplitterTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
2075 }
2076
2077 LayerTestResult<uint8_t, 3> CopyViaSplitterUint8Test(
2078     armnn::IWorkloadFactory& workloadFactory,
2079     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2080 {
2081     return CopyViaSplitterTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
2082 }
2083
2084 LayerTestResult<int16_t, 3> CopyViaSplitterInt16Test(
2085         armnn::IWorkloadFactory& workloadFactory,
2086         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2087 {
2088     return CopyViaSplitterTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.0f, 0);
2089 }
2090
2091 void LstmUtilsZeroVectorTest()
2092 {
2093     armnn::TensorInfo inputDesc({4}, armnn::DataType::Float32);
2094     boost::multi_array<float, 1> input = MakeTensor<float, 1>(inputDesc, std::vector<float>(
2095             {2., 3., 3., 4.}));
2096
2097     boost::multi_array<float, 1> expectedOutput = MakeTensor<float, 1>(inputDesc, std::vector<float>(
2098             {0., 0., 0., 0.}));
2099
2100     return LstmUtilsZeroVectorTestImpl<armnn::DataType::Float32>(input, 4, expectedOutput);
2101 }
2102
2103 void LstmUtilsMeanStddevNormalizationNoneZeroInputTest()
2104 {
2105     uint32_t batchSize = 2;
2106     uint32_t vecSize = 4;
2107     armnn::TensorInfo inputDesc({batchSize, vecSize}, armnn::DataType::Float32);
2108     boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
2109             { 0.1f, 0.2f, 0.3f, 0.4f,      //batch 0
2110               0.9f, 1.0f, 1.1f, 1.2f }));  //batch 1
2111
2112     boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(inputDesc, std::vector<float>(
2113             { -1.34164071f, -0.447213531f, 0.44721365f,  1.34164071f,      //batch 0
2114               -1.34163153f, -0.447210163f, 0.447211236f, 1.3416326f  }));  //batch 1
2115
2116     return LstmUtilsMeanStddevNormalizationTestImpl<armnn::DataType::Float32>(input,
2117             vecSize, batchSize, expectedOutput);
2118 }
2119
2120 void LstmUtilsMeanStddevNormalizationAllZeroInputTest()
2121 {
2122     uint32_t batchSize = 2;
2123     uint32_t vecSize = 4;
2124     armnn::TensorInfo inputDesc({batchSize, vecSize}, armnn::DataType::Float32);
2125     boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
2126             { 0.0f, 0.0f, 0.0f, 0.0f,      //batch 0
2127               0.0f, 0.0f, 0.0f, 0.0f }));  //batch 1
2128
2129     boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(inputDesc, std::vector<float>(
2130             { 0.0f, 0.0f, 0.0f, 0.0f,      //batch 0
2131               0.0f, 0.0f, 0.0f, 0.0f }));  //batch 1
2132
2133     return LstmUtilsMeanStddevNormalizationTestImpl<armnn::DataType::Float32>(input,
2134             vecSize, batchSize, expectedOutput);
2135 }
2136
2137 void LstmUtilsMeanStddevNormalizationMixedZeroInputTest()
2138 {
2139     uint32_t batchSize = 2;
2140     uint32_t vecSize = 4;
2141     armnn::TensorInfo inputDesc({batchSize, vecSize}, armnn::DataType::Float32);
2142     boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
2143             { 0.0f, 0.0f, 0.0f, 0.0f,      //batch 0
2144               0.1f, 0.2f, 0.3f, 0.4f }));  //batch 1
2145
2146     boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(inputDesc, std::vector<float>(
2147             {         0.0f,          0.0f,        0.0f,        0.0f,      //batch 0
2148               -1.34164071f, -0.447213531f, 0.44721365f, 1.34164071f }));  //batch 1
2149
2150     return LstmUtilsMeanStddevNormalizationTestImpl<armnn::DataType::Float32>(input,
2151             vecSize, batchSize, expectedOutput);
2152 }
2153
2154
2155 void LstmUtilsVectorBatchVectorCwiseProductTest()
2156 {
2157     uint32_t batchSize = 4;
2158     uint32_t vecSize = 29;
2159     armnn::TensorInfo vecDesc({vecSize}, armnn::DataType::Float32);
2160     boost::multi_array<float, 1> vector = MakeTensor<float, 1>(vecDesc, std::vector<float>(
2161             {   1.1f,   2.2f,   3.3f,   4.4f,   5.5f,   6.6f,   7.7f,   8.8f,   9.9f, 10.1f,
2162               11.11f, 12.12f, 13.13f, 14.14f, 15.15f, 16.16f, 17.17f, 18.18f, 19.19f, 20.2f,
2163               21.21f, 22.22f, 23.23f, 24.24f, 25.25f, 26.26f, 27.27f, 28.28f,     0.0f}));
2164
2165     armnn::TensorInfo batchVecDesc({batchSize, vecSize}, armnn::DataType::Float32);
2166     boost::multi_array<float, 2> batchVector = MakeTensor<float, 2>(batchVecDesc, std::vector<float>(
2167             { /* batch 0 */
2168                 1.1f,   2.2f,   3.3f,   4.4f,   5.5f,   6.6f,   7.7f,   8.8f,   9.9f,  10.1f,
2169               11.11f, 12.12f, 13.13f, 14.14f, 15.15f, 16.16f, 17.17f, 18.18f, 19.19f,  20.2f,
2170               21.21f, 22.22f, 23.23f, 24.24f, 25.25f, 26.26f, 27.27f, 28.28f,   0.0f,
2171               /* batch 1 */
2172                 -1.1f,   -2.2f,   -3.3f,   -4.4f,   -5.5f,   -6.6f,   -7.7f,   -8.8f,   -9.9f, -10.1f,
2173               -11.11f, -12.12f, -13.13f, -14.14f, -15.15f, -16.16f, -17.17f, -18.18f, -19.19f, -20.2f,
2174               -21.21f, -22.22f, -23.23f, -24.24f, -25.25f, -26.26f, -27.27f, -28.28f,    0.0f,
2175               /* batch 2 */
2176                 1.1f,   -2.2f,   3.3f,   -4.4f,   5.5f,   -6.6f,   7.7f,   -8.8f,   9.9f, -10.1f,
2177               11.11f, -12.12f, 13.13f, -14.14f, 15.15f, -16.16f, 17.17f, -18.18f, 19.19f, -20.2f,
2178               21.21f, -22.22f, 23.23f, -24.24f, 25.25f, -26.26f, 27.27f, -28.28f,   0.0f,
2179               /* batch 3 */
2180                 -1.1f,   2.2f,   -3.3f,   4.4f,   -5.5f,   6.6f,   -7.7f,   8.8f,   -9.9f, 10.1f,
2181               -11.11f, 12.12f, -13.13f, 14.14f, -15.15f, 16.16f, -17.17f, 18.18f, -19.19f, 20.2f,
2182               -21.21f, 22.22f, -23.23f, 24.24f, -25.25f, 26.26f, -27.27f, 28.28f,    0.0f}));
2183
2184     // Expect output = input * output + output.
2185     boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(batchVecDesc, std::vector<float>(
2186             { /* batch 0 */
2187                  1.210000f,    4.840000f,   10.889999f,   19.360001f,   30.250000f,   43.559998f,
2188                 59.289997f,   77.440002f,   98.009995f,  102.010010f,  123.432091f,  146.894394f,
2189                172.396896f,  199.939606f,  229.522491f,  261.145599f,  294.808899f,  330.512421f,
2190                368.256134f,  408.040039f,  449.864075f,  493.728363f,  539.632874f,  587.577576f,
2191                637.562500f,  689.587585f,  743.652954f,  799.758423f,    0.000000f,
2192               /* batch 1 */
2193                 -1.210000f,   -4.840000f,  -10.889999f,  -19.360001f,  -30.250000f,  -43.559998f,
2194                -59.289997f,  -77.440002f,  -98.009995f, -102.010010f, -123.432091f, -146.894394f,
2195               -172.396896f, -199.939606f, -229.522491f, -261.145599f, -294.808899f, -330.512421f,
2196               -368.256134f, -408.040039f, -449.864075f, -493.728363f, -539.632874f, -587.577576f,
2197               -637.562500f, -689.587585f, -743.652954f, -799.758423f,    0.000000f,
2198               /* batch 2 */
2199                  1.210000f,   -4.840000f,  10.889999f,   -19.360001f,   30.250000f,  -43.559998f,
2200                 59.289997f,  -77.440002f,  98.009995f,  -102.010010f,  123.432091f, -146.894394f,
2201                172.396896f, -199.939606f, 229.522491f,  -261.145599f,  294.808899f, -330.512421f,
2202                368.256134f, -408.040039f, 449.864075f,  -493.728363f,  539.632874f, -587.577576f,
2203                637.562500f, -689.587585f, 743.652954f,  -799.758423f,    0.000000f,
2204               /* batch 3 */
2205                 -1.210000f,    4.840000f,  -10.889999f,   19.360001f,  -30.250000f,   43.559998f,
2206                -59.289997f,   77.440002f,  -98.009995f,  102.010010f, -123.432091f,  146.894394f,
2207               -172.396896f,  199.939606f, -229.522491f,  261.145599f, -294.808899f,  330.512421f,
2208               -368.256134f,  408.040039f, -449.864075f,  493.728363f, -539.632874f,  587.577576f,
2209               -637.562500f,  689.587585f, -743.652954f,  799.758423f,    0.000000f}));
2210
2211     return LstmUtilsVectorBatchVectorCwiseProductTestImpl<armnn::DataType::Float32>(vector, batchVector,
2212             vecSize, batchSize, expectedOutput);
2213 }
2214
2215
2216 void LstmUtilsVectorBatchVectorAddTest()
2217 {
2218     uint32_t batchSize = 2;
2219     uint32_t vecSize = 3;
2220     armnn::TensorInfo vecDesc({vecSize}, armnn::DataType::Float32);
2221     boost::multi_array<float, 1> vector = MakeTensor<float, 1>(vecDesc, std::vector<float>(
2222             { 0.0f, -0.5f, 1.0f}));
2223
2224     armnn::TensorInfo batchVecDesc({batchSize, vecSize}, armnn::DataType::Float32);
2225     boost::multi_array<float, 2> batchVector = MakeTensor<float, 2>(batchVecDesc, std::vector<float>(
2226             { 1.0f, 2.0f, 3.0f,    //batch 0
2227               4.0f, 5.0f, 6.0f})); //batch 1
2228
2229     boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(batchVecDesc, std::vector<float>(
2230             { 1.0f, 1.5f, 4.0f,
2231               4.0f, 4.5f, 7.0f}));
2232
2233     return LstmUtilsVectorBatchVectorAddTestImpl<armnn::DataType::Float32>(vector, batchVector,
2234             vecSize, batchSize, expectedOutput);
2235 }
2236
2237
2238 LayerTestResult<float, 2> LstmLayerFloat32WithCifgWithPeepholeNoProjectionTest(
2239     armnn::IWorkloadFactory& workloadFactory,
2240     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2241 {
2242     armnn::TensorInfo inputDesc({ 2, 2 }, armnn::DataType::Float32);
2243     boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
2244             { 2., 3., 3., 4. }));
2245
2246     armnn::TensorInfo outputDesc({ 2, 4 }, armnn::DataType::Float32);
2247     boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
2248             {-0.36444446f, -0.00352185f, 0.12886585f, -0.05163646f,
2249              -0.42734814f, -0.00478661f,  0.13455015f, -0.03560682f}));
2250     return LstmLayerWithCifgWithPeepholeNoProjectionTestImpl<armnn::DataType::Float32>(
2251         workloadFactory, memoryManager, input, expectedOutput);
2252 }
2253
2254 LayerTestResult<float, 2> LstmLayerFloat32NoCifgWithPeepholeWithProjectionTest(
2255     armnn::IWorkloadFactory& workloadFactory,
2256     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2257 {
2258     armnn::TensorInfo inputDesc({ 2, 5 }, armnn::DataType::Float32);
2259     boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
2260             {0.787926f, 0.151646f, 0.071352f, 0.118426f, 0.458058f,
2261              0.295743f, 0.544053f, 0.690064f, 0.858138f, 0.497181f}));
2262
2263     armnn::TensorInfo outputDesc({ 2, 16 }, armnn::DataType::Float32);
2264     boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
2265             {-0.00396806f, 0.029352f,     -0.00279226f, 0.0159977f,   -0.00835576f,
2266              -0.0211779f,  0.0283512f,    -0.0114597f,  0.00907307f,  -0.0244004f,
2267              -0.0152191f,  -0.0259063f,   0.00914318f,  0.00415118f,  0.017147f,
2268              0.0134203f, -0.013869f,    0.0287268f,   -0.00334693f, 0.00733398f,  -0.0287926f,
2269              -0.0186926f,   0.0193662f,   -0.0115437f,  0.00422612f,  -0.0345232f,
2270              0.00223253f,   -0.00957321f, 0.0210624f,   0.013331f,    0.0150954f,
2271              0.02168f}));
2272     return LstmLayerNoCifgWithPeepholeWithProjectionTestImpl<armnn::DataType::Float32>(
2273         workloadFactory, memoryManager, input, expectedOutput);
2274 }
2275
2276 LayerTestResult<float, 2> LstmLayerFloat32NoCifgNoPeepholeNoProjectionTest(
2277     armnn::IWorkloadFactory& workloadFactory,
2278     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2279 {
2280     armnn::TensorInfo inputDesc({2, 2}, armnn::DataType::Float32);
2281     boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
2282             {2., 3., 3., 4.}));
2283
2284
2285     armnn::TensorInfo outputDesc({2, 4}, armnn::DataType::Float32);
2286     boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
2287             {{-0.02973187f, 0.1229473f,   0.20885126f, -0.15358765f,
2288               -0.0185422f,   0.11281417f,  0.24466537f, -0.1826292f}}));
2289
2290     return LstmNoCifgNoPeepholeNoProjectionTestImpl<armnn::DataType::Float32>(
2291         workloadFactory, memoryManager, input, expectedOutput);
2292 }
2293
2294
2295 LayerTestResult<float, 2> LstmLayerFloat32NoCifgWithPeepholeWithProjectionWithLayerNormTest(
2296         armnn::IWorkloadFactory& workloadFactory,
2297         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2298 {
2299     armnn::TensorInfo inputDesc({ 2, 5 }, armnn::DataType::Float32);
2300     boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
2301             {0.7f, 0.8f, 0.1f, 0.2f, 0.3f,     //batch 0
2302              0.3f, 0.2f, 0.9f, 0.8f, 0.1f}));  //batch 1
2303
2304     armnn::TensorInfo outputDesc({ 2, 3 }, armnn::DataType::Float32);
2305     boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
2306             {  0.0244077f,  0.128027f, -0.00170918f,    //batch 0
2307              -0.00692428f, 0.0848741f,    0.063445f})); //batch 1
2308     return LstmLayerNoCifgWithPeepholeWithProjectionWithLayerNormTestImpl<armnn::DataType::Float32>(
2309             workloadFactory, memoryManager, input, expectedOutput);
2310 }
2311
2312
2313 LayerTestResult<int16_t, 2> LstmLayerInt16NoCifgNoPeepholeNoProjectionTest(
2314     armnn::IWorkloadFactory& workloadFactory,
2315     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2316 {
2317     const float qScale = 1.0f;
2318     const int32_t qOffset = 0;
2319
2320     const armnn::DataType datatype = armnn::DataType::QuantisedSymm16;
2321     const armnn::DataType constantDatatype = armnn::DataType::QuantisedAsymm8;
2322
2323     armnn::TensorInfo inputDesc({2, 2}, datatype);
2324     boost::multi_array<int16_t , 2> input = MakeTensor<int16_t , 2>(inputDesc, QuantizedVector<int16_t>(qScale, qOffset,
2325             std::vector<float>{2., 3., 3., 4.}));
2326
2327     armnn::TensorInfo outputDesc({2, 4}, datatype);
2328     boost::multi_array<int16_t, 2> expectedOutput = MakeTensor<int16_t, 2>(outputDesc, QuantizedVector<int16_t>(qScale,
2329             qOffset, std::vector<float>({{-0.02973187f, 0.1229473f,   0.20885126f, -0.15358765f,
2330                                           -0.0185422f,  0.11281417f,  0.24466537f, -0.1826292f}})));
2331
2332     return LstmNoCifgNoPeepholeNoProjectionTestImpl<datatype>(
2333         workloadFactory, memoryManager, input, expectedOutput, qScale, qOffset, constantDatatype);
2334
2335 }
2336
2337 LayerTestResult<int16_t, 2> LstmLayerInt16WithCifgWithPeepholeNoProjectionTest(
2338     armnn::IWorkloadFactory& workloadFactory,
2339     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2340 {
2341     const float qScale = 1.0f;
2342     const int32_t qOffset = 0;
2343
2344     const armnn::DataType datatype = armnn::DataType::QuantisedSymm16;
2345     const armnn::DataType constantDatatype = armnn::DataType::QuantisedAsymm8;
2346
2347     armnn::TensorInfo inputDesc({ 2, 2 }, datatype);
2348     boost::multi_array<int16_t, 2> input = MakeTensor<int16_t, 2>(inputDesc, QuantizedVector<int16_t>(qScale, qOffset,
2349             std::vector<float>({ 2., 3., 3., 4. })));
2350
2351     armnn::TensorInfo outputDesc({ 2, 4 }, datatype);
2352     boost::multi_array<int16_t, 2> expectedOutput = MakeTensor<int16_t, 2>(outputDesc, QuantizedVector<int16_t>(qScale,
2353             qOffset, std::vector<float>(
2354             {-0.36444446f, -0.00352185f, 0.12886585f, -0.05163646f,
2355              -0.42734814f, -0.00478661f, 0.13455015f, -0.03560682f})));
2356
2357     return LstmLayerWithCifgWithPeepholeNoProjectionTestImpl<datatype>(
2358         workloadFactory, memoryManager, input, expectedOutput, qScale, qOffset, constantDatatype);
2359 }
2360
2361 LayerTestResult<int16_t, 2> LstmLayerInt16NoCifgWithPeepholeWithProjectionTest(
2362     armnn::IWorkloadFactory& workloadFactory,
2363     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2364 {
2365     const float qScale = 2.0f;
2366     const int32_t qOffset = 0;
2367
2368     const armnn::DataType datatype = armnn::DataType::QuantisedSymm16;
2369     const armnn::DataType constantDatatype = armnn::DataType::QuantisedAsymm8;
2370
2371     armnn::TensorInfo inputDesc({ 2, 5 }, datatype);
2372     boost::multi_array<int16_t, 2> input = MakeTensor<int16_t, 2>(inputDesc, QuantizedVector<int16_t>(qScale,
2373             qOffset, std::vector<float>(
2374             {0.787926f, 0.151646f, 0.071352f, 0.118426f, 0.458058f,
2375              0.295743f, 0.544053f, 0.690064f, 0.858138f, 0.497181f})));
2376
2377     armnn::TensorInfo outputDesc({ 2, 16 }, datatype);
2378     boost::multi_array<int16_t, 2> expectedOutput = MakeTensor<int16_t, 2>(outputDesc, QuantizedVector<int16_t>(qScale,
2379             qOffset, std::vector<float>(
2380             {-0.00396806f,  0.029352f,   -0.00279226f, 0.0159977f,  -0.00835576f,
2381              -0.0211779f,   0.0283512f,  -0.0114597f,  0.00907307f, -0.0244004f,
2382              -0.0152191f,  -0.0259063f,   0.00914318f, 0.00415118f,  0.017147f,
2383               0.0134203f,  -0.013869f,    0.0287268f, -0.00334693f,  0.00733398f, -0.0287926f,
2384              -0.0186926f,   0.0193662f,  -0.0115437f,  0.00422612f, -0.0345232f,
2385               0.00223253f, -0.00957321f,  0.0210624f,  0.013331f,    0.0150954f,   0.02168f})));
2386
2387     return LstmLayerNoCifgWithPeepholeWithProjectionTestImpl<datatype>(
2388         workloadFactory, memoryManager, input, expectedOutput, qScale, qOffset, constantDatatype);
2389 }
2390
2391 LayerTestResult<int16_t, 2> LstmLayerInt16NoCifgNoPeepholeNoProjectionInt16ConstantTest(
2392     armnn::IWorkloadFactory& workloadFactory,
2393     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2394 {
2395     const float qScale = 1.0f;
2396     const int32_t qOffset = 0;
2397
2398     const armnn::DataType datatype = armnn::DataType::QuantisedSymm16; // datatype & constants set to QSymm16
2399
2400     armnn::TensorInfo inputDesc({2, 2}, datatype);
2401     boost::multi_array<int16_t , 2> input = MakeTensor<int16_t , 2>(inputDesc, QuantizedVector<int16_t>(qScale,
2402             qOffset, std::vector<float>{2., 3., 3., 4.}));
2403
2404     armnn::TensorInfo outputDesc({2, 4}, datatype);
2405     boost::multi_array<int16_t, 2> expectedOutput = MakeTensor<int16_t, 2>(outputDesc, QuantizedVector<int16_t>(qScale,
2406             qOffset, std::vector<float>({{-0.02973187f, 0.1229473f,   0.20885126f, -0.15358765f,
2407                                           -0.0185422f,  0.11281417f,  0.24466537f, -0.1826292f}})));
2408
2409     return LstmNoCifgNoPeepholeNoProjectionTestImpl<datatype>(
2410         workloadFactory, memoryManager, input, expectedOutput, qScale, qOffset, datatype);
2411 }
2412
2413 LayerTestResult<float,3> ConcatTest(
2414     armnn::IWorkloadFactory& workloadFactory,
2415     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2416 {
2417     unsigned int outputWidth = 3;
2418     unsigned int outputHeight = 6;
2419     unsigned int outputChannels = 3;
2420
2421     unsigned int inputWidth1 = 3;
2422     unsigned int inputHeight1 = 6;
2423     unsigned int inputChannels1 = 2;
2424
2425     unsigned int inputWidth2 = 3;
2426     unsigned int inputHeight2 = 6;
2427     unsigned int inputChannels2 = 1;
2428
2429     // Define the tensor descriptors.
2430     armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::Float32);
2431     armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::Float32);
2432     armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::Float32);
2433
2434     LayerTestResult<float,3> ret(outputTensorInfo);
2435
2436     ret.outputExpected = MakeTensor<float, 3>(outputTensorInfo, std::vector<float>(
2437     {
2438             1.0f, 2.0f, 3.0f,
2439             4.0f, 5.0f, 6.0f,
2440             7.0f, 8.0f, 9.0f,
2441             10.0f, 11.0f, 12.0f,
2442             13.0f, 14.0f, 15.0f,
2443             16.0f, 17.0f, 18.0f,
2444
2445             19.0f, 20.0f, 21.0f,
2446             22.0f, 23.0f, 24.0f,
2447             25.0f, 26.0f, 27.0f,
2448             28.0f, 29.0f, 30.0f,
2449             31.0f, 32.0f, 33.0f,
2450             34.0f, 35.0f, 36.0f,
2451
2452             37.0f, 38.0f, 39.0f,
2453             40.0f, 41.0f, 42.0f,
2454             43.0f, 44.0f, 45.0f,
2455             46.0f, 47.0f, 48.0f,
2456             49.0f, 50.0f, 51.0f,
2457             52.0f, 53.0f, 54.0f,
2458         })
2459     );
2460
2461     auto input1 = MakeTensor<float, 3>(inputTensorInfo1, std::vector<float>(
2462         {
2463             1.0f, 2.0f, 3.0f,
2464             4.0f, 5.0f, 6.0f,
2465             7.0f, 8.0f, 9.0f,
2466             10.0f, 11.0f, 12.0f,
2467             13.0f, 14.0f, 15.0f,
2468             16.0f, 17.0f, 18.0f,
2469
2470             19.0f, 20.0f, 21.0f,
2471             22.0f, 23.0f, 24.0f,
2472             25.0f, 26.0f, 27.0f,
2473             28.0f, 29.0f, 30.0f,
2474             31.0f, 32.0f, 33.0f,
2475             34.0f, 35.0f, 36.0f,
2476         })
2477     );
2478
2479     auto input2 = MakeTensor<float, 3>(inputTensorInfo2, std::vector<float>(
2480         {
2481             37.0f, 38.0f, 39.0f,
2482             40.0f, 41.0f, 42.0f,
2483             43.0f, 44.0f, 45.0f,
2484             46.0f, 47.0f, 48.0f,
2485             49.0f, 50.0f, 51.0f,
2486             52.0f, 53.0f, 54.0f,
2487         })
2488     );
2489
2490     std::vector<unsigned int> wOrigin1 = {0, 0, 0}; //Extent of the window is defined by size of input[0].
2491     armnn::ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
2492
2493     std::vector<unsigned int> wOrigin2 = {2, 0, 0}; //Extent of the window is defined by size of input[1].
2494     armnn::ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
2495
2496     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2497
2498     bool subTensorsSupported = workloadFactory.SupportsSubTensors();
2499
2500     std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
2501         subTensorsSupported ?
2502             workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
2503             workloadFactory.CreateTensorHandle(inputTensorInfo1);
2504
2505     std::unique_ptr<armnn::ITensorHandle> inputHandle2  =
2506         subTensorsSupported ?
2507             workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
2508             workloadFactory.CreateTensorHandle(inputTensorInfo2);
2509
2510     armnn::ConcatQueueDescriptor data;
2511     armnn::WorkloadInfo info;
2512     AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2513     AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2514     AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2515
2516     data.m_ViewOrigins.push_back(window1);
2517     data.m_ViewOrigins.push_back(window2);
2518
2519     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConcat(data, info);
2520
2521     inputHandle1->Allocate();
2522     inputHandle2->Allocate();
2523     outputHandle->Allocate();
2524
2525     CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
2526     CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
2527
2528     workload->PostAllocationConfigure();
2529     workload->Execute();
2530
2531     CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
2532
2533     return ret;
2534 }
2535
2536 LayerTestResult<float,4> AdditionTest(
2537     armnn::IWorkloadFactory& workloadFactory,
2538     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2539 {
2540     unsigned int batchSize = 2;
2541     unsigned int channels  = 2;
2542     unsigned int height    = 2;
2543     unsigned int width     = 3;
2544
2545     armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
2546     armnn::TensorInfo outputTensorInfo;
2547
2548     unsigned int shape[] = {batchSize, channels, height, width};
2549
2550     inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
2551     inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
2552     outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
2553
2554
2555     auto input1 = MakeTensor<float, 4>(inputTensorInfo1, std::vector<float>(
2556         {
2557             0.0f, 2.0f, 1.0f,
2558             0.2f, 1.0f, 2.0f,
2559
2560             1.0f, 2.0f, 1.0f,
2561             0.2f, 1.0f, 2.0f,
2562
2563             0.0f, 2.0f, 1.0f,
2564             4.2f, 1.0f, 2.0f,
2565
2566             0.0f, 0.0f, 1.0f,
2567             0.2f, 1.0f, 2.0f,
2568         }));
2569
2570     auto input2 = MakeTensor<float, 4>(inputTensorInfo2, std::vector<float>(
2571         {
2572             1.0f, 2.0f, 1.0f,
2573             0.0f, 1.0f, 2.0f,
2574
2575             1.0f, 2.0f, -2.0f,
2576             0.2f, 1.0f, 2.0f,
2577
2578             0.0f, 2.0f, 1.0f,
2579             4.2f, 0.0f, -3.0f,
2580
2581             0.0f, 0.0f, 1.0f,
2582             0.7f, 1.0f, 5.0f,
2583         }));
2584
2585     LayerTestResult<float,4> ret(outputTensorInfo);
2586     ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo, std::vector<float>(
2587         {
2588             1.0f, 4.0f, 2.0f,
2589             0.2f, 2.0f, 4.0f,
2590
2591             2.0f, 4.0f, -1.0f,
2592             0.4f, 2.0f, 4.0f,
2593
2594             0.0f, 4.0f, 2.0f,
2595             8.4f, 1.0f, -1.0f,
2596
2597             0.0f, 0.0f, 2.0f,
2598             0.9f, 2.0f, 7.0f,
2599         }));
2600
2601     std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
2602     std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
2603     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2604
2605     armnn::AdditionQueueDescriptor data;
2606     armnn::WorkloadInfo info;
2607     AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2608     AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2609     AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2610
2611     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
2612
2613     inputHandle1->Allocate();
2614     inputHandle2->Allocate();
2615     outputHandle->Allocate();
2616
2617     CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
2618     CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
2619
2620     workload->PostAllocationConfigure();
2621     workload->Execute();
2622
2623     CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
2624
2625     return ret;
2626 }
2627
2628 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
2629 LayerTestResult<T, 4> AdditionBroadcastTestImpl(
2630     armnn::IWorkloadFactory& workloadFactory,
2631     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2632     float qScale,
2633     int32_t qOffset)
2634 {
2635     armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 1}, ArmnnType);
2636     armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 2, 3}, ArmnnType);
2637     armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
2638
2639     if (armnn::IsQuantizedType<T>())
2640     {
2641         inputTensorInfo1.SetQuantizationScale(qScale);
2642         inputTensorInfo1.SetQuantizationOffset(qOffset);
2643         inputTensorInfo2.SetQuantizationScale(qScale);
2644         inputTensorInfo2.SetQuantizationOffset(qOffset);
2645         outputTensorInfo.SetQuantizationScale(qScale);
2646         outputTensorInfo.SetQuantizationOffset(qOffset);
2647     }
2648
2649     auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset,
2650         {
2651             0.0f,
2652             1.0f,
2653
2654             2.0f,
2655             3.0f,
2656
2657             4.0f,
2658             5.0f,
2659         }));
2660
2661     auto input2 = MakeTensor<T, 4>(inputTensorInfo2, QuantizedVector<T>(qScale, qOffset,
2662         {
2663             0.5f, 1.5f, 2.5f,
2664             3.5f, 4.5f, 5.5f,
2665         }));
2666
2667     LayerTestResult<T,4> ret(outputTensorInfo);
2668     ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset,
2669         {
2670             0.5f, 1.5f, 2.5f,
2671             4.5f, 5.5f, 6.5f,
2672
2673             2.5f, 3.5f, 4.5f,
2674             6.5f, 7.5f, 8.5f,
2675
2676             4.5f, 5.5f, 6.5f,
2677             8.5f, 9.5f, 10.5f,
2678         }));
2679
2680     std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
2681     std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
2682     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2683
2684     armnn::AdditionQueueDescriptor data;
2685     armnn::WorkloadInfo info;
2686     AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2687     AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2688     AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2689
2690     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
2691
2692     inputHandle1->Allocate();
2693     inputHandle2->Allocate();
2694     outputHandle->Allocate();
2695
2696     CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
2697     CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
2698
2699     workload->PostAllocationConfigure();
2700     workload->Execute();
2701
2702     CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
2703
2704     return ret;
2705 }
2706
2707 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
2708 LayerTestResult<T, 4> AdditionBroadcast1ElementTestImpl(
2709     armnn::IWorkloadFactory& workloadFactory,
2710     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2711     float qScale,
2712     int32_t qOffset)
2713 {
2714     armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
2715     armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 1, 1}, ArmnnType);
2716     armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
2717
2718     if (armnn::IsQuantizedType<T>())
2719     {
2720         inputTensorInfo1.SetQuantizationScale(qScale);
2721         inputTensorInfo1.SetQuantizationOffset(qOffset);
2722         inputTensorInfo2.SetQuantizationScale(qScale);
2723         inputTensorInfo2.SetQuantizationOffset(qOffset);
2724         outputTensorInfo.SetQuantizationScale(qScale);
2725         outputTensorInfo.SetQuantizationOffset(qOffset);
2726     }
2727
2728     auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset,
2729         {
2730              0.0f,  1.0f,  2.0f,
2731              3.0f,  4.0f,  5.0f,
2732              6.0f,  7.0f,  8.0f,
2733              9.0f, 10.0f, 11.0f,
2734             12.0f, 13.0f, 14.0f,
2735             15.0f, 16.0f, 17.0f,
2736         }));
2737
2738     auto input2 = MakeTensor<T, 4>(inputTensorInfo2, QuantizedVector<T>(qScale, qOffset,
2739         {
2740             0.5f,
2741         }));
2742
2743     LayerTestResult<T,4> ret(outputTensorInfo);
2744     ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset,
2745         {
2746              0.5f,  1.5f,  2.5f,
2747              3.5f,  4.5f,  5.5f,
2748              6.5f,  7.5f,  8.5f,
2749              9.5f, 10.5f, 11.5f,
2750             12.5f, 13.5f, 14.5f,
2751             15.5f, 16.5f, 17.5f,
2752         }));
2753
2754     std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
2755     std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
2756     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2757
2758     armnn::AdditionQueueDescriptor data;
2759     armnn::WorkloadInfo info;
2760     AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2761     AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2762     AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2763
2764     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
2765
2766     inputHandle1->Allocate();
2767     inputHandle2->Allocate();
2768     outputHandle->Allocate();
2769
2770     CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
2771     CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
2772
2773     workload->PostAllocationConfigure();
2774     workload->Execute();
2775
2776     CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
2777
2778     return ret;
2779 }
2780
2781 LayerTestResult<float, 4> AdditionBroadcastTest(
2782     armnn::IWorkloadFactory& workloadFactory,
2783     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2784 {
2785     return AdditionBroadcastTestImpl<armnn::DataType::Float32>(
2786         workloadFactory, memoryManager, 0.0f, 0);
2787 }
2788
2789 LayerTestResult<uint8_t, 4> AdditionBroadcastUint8Test(
2790     armnn::IWorkloadFactory& workloadFactory,
2791     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2792 {
2793     return AdditionBroadcastTestImpl<armnn::DataType::QuantisedAsymm8>(
2794         workloadFactory, memoryManager, 2.f, 0);
2795 }
2796
2797 LayerTestResult<int16_t, 4> AdditionBroadcastInt16Test(
2798     armnn::IWorkloadFactory& workloadFactory,
2799     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2800 {
2801     return AdditionBroadcastTestImpl<armnn::DataType::QuantisedSymm16>(
2802         workloadFactory, memoryManager, 2.f, 0);
2803 }
2804
2805 LayerTestResult<float, 4> AdditionBroadcast1ElementTest(
2806     armnn::IWorkloadFactory& workloadFactory,
2807     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2808 {
2809     return AdditionBroadcast1ElementTestImpl<armnn::DataType::Float32>(
2810         workloadFactory, memoryManager, 0.0f, 0);
2811 }
2812
2813 LayerTestResult<uint8_t, 4> AdditionBroadcast1ElementUint8Test(
2814     armnn::IWorkloadFactory& workloadFactory,
2815     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2816 {
2817     return AdditionBroadcast1ElementTestImpl<armnn::DataType::QuantisedAsymm8>(
2818         workloadFactory, memoryManager, 0.1333333f, 128);
2819 }
2820
2821 LayerTestResult<int16_t, 4> AdditionBroadcast1ElementInt16Test(
2822     armnn::IWorkloadFactory& workloadFactory,
2823     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2824 {
2825     return AdditionBroadcast1ElementTestImpl<armnn::DataType::QuantisedSymm16>(
2826         workloadFactory, memoryManager, 0.1333333f, 0);
2827 }
2828
2829 LayerTestResult<float,4> CompareAdditionTest(
2830     armnn::IWorkloadFactory& workloadFactory,
2831     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2832     armnn::IWorkloadFactory& refWorkloadFactory)
2833 {
2834     unsigned int batchSize = 4;
2835     unsigned int channels  = 1;
2836     unsigned int height    = 2;
2837     unsigned int width     = 3;
2838
2839     armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
2840     armnn::TensorInfo outputTensorInfo;
2841
2842     unsigned int shape[] = {batchSize, channels, height, width};
2843
2844     inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
2845     inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
2846     outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
2847
2848     auto input1 = MakeRandomTensor<float, 4>(inputTensorInfo1, 1232);
2849     auto input2 = MakeRandomTensor<float, 4>(inputTensorInfo2, 456);
2850
2851     LayerTestResult<float,4> ret(outputTensorInfo);
2852
2853     std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
2854     std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
2855     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2856
2857     std::unique_ptr<armnn::ITensorHandle> inputHandle1Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo1);
2858     std::unique_ptr<armnn::ITensorHandle> inputHandle2Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo2);
2859     std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
2860
2861     armnn::AdditionQueueDescriptor data;
2862     armnn::WorkloadInfo info;
2863     AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2864     AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2865     AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2866
2867     armnn::AdditionQueueDescriptor refData = data;
2868     armnn::WorkloadInfo refInfo = info;
2869     SetWorkloadInput(refData, refInfo, 0, inputTensorInfo1, inputHandle1Ref.get());
2870     SetWorkloadInput(refData, refInfo, 1, inputTensorInfo2, inputHandle2Ref.get());
2871     SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
2872
2873     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
2874     std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateAddition(refData, refInfo);
2875
2876     inputHandle1->Allocate();
2877     inputHandle2->Allocate();
2878     outputHandle->Allocate();
2879     inputHandle1Ref->Allocate();
2880     inputHandle2Ref->Allocate();
2881     outputHandleRef->Allocate();
2882
2883     CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
2884     CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
2885     CopyDataToITensorHandle(inputHandle1Ref.get(), &input1[0][0][0][0]);
2886     CopyDataToITensorHandle(inputHandle2Ref.get(), &input2[0][0][0][0]);
2887
2888     workload->PostAllocationConfigure();
2889     workload->Execute();
2890     workloadRef->PostAllocationConfigure();
2891     workloadRef->Execute();
2892
2893     CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
2894     CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
2895
2896     return ret;
2897 }
2898
2899 namespace {
2900 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
2901 LayerTestResult<T, 4> DivisionTestHelper(
2902     armnn::IWorkloadFactory& workloadFactory,
2903     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2904     const unsigned int shape0[4],
2905     const std::vector<T>& values0,
2906     float scale0,
2907     int32_t offset0,
2908     const unsigned int shape1[4],
2909     const std::vector<T> & values1,
2910     float scale1,
2911     int32_t offset1,
2912     const unsigned int outShape[4],
2913     const std::vector<T> & outValues,
2914     float outScale,
2915     int32_t outOffset)
2916 {
2917     armnn::TensorInfo inputTensorInfo0(4, shape0, ArmnnType);
2918     armnn::TensorInfo inputTensorInfo1(4, shape1, ArmnnType);
2919     armnn::TensorInfo outputTensorInfo(4, outShape, ArmnnType);
2920
2921     inputTensorInfo0.SetQuantizationScale(scale0);
2922     inputTensorInfo0.SetQuantizationOffset(offset0);
2923
2924     inputTensorInfo1.SetQuantizationScale(scale1);
2925     inputTensorInfo1.SetQuantizationOffset(offset1);
2926
2927     outputTensorInfo.SetQuantizationScale(outScale);
2928     outputTensorInfo.SetQuantizationOffset(outOffset);
2929
2930     auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
2931     auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
2932
2933     LayerTestResult<T, 4> result(outputTensorInfo);
2934     result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
2935
2936     std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
2937     std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
2938     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2939
2940     armnn::DivisionQueueDescriptor data;
2941     armnn::WorkloadInfo info;
2942     AddInputToWorkload(data,  info, inputTensorInfo0, inputHandle0.get());
2943     AddInputToWorkload(data,  info, inputTensorInfo1, inputHandle1.get());
2944     AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2945
2946     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateDivision(data, info);
2947
2948     inputHandle0->Allocate();
2949     inputHandle1->Allocate();
2950     outputHandle->Allocate();
2951
2952     CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
2953     CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
2954
2955     workload->PostAllocationConfigure();
2956     workload->Execute();
2957
2958     CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
2959
2960     return result;
2961 }
2962 } // anonymous namespace
2963
2964 LayerTestResult<float,4> DivisionByZeroTest(
2965     armnn::IWorkloadFactory& workloadFactory,
2966     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2967 {
2968     const unsigned int width = 2;
2969     const unsigned int height = 2;
2970     const unsigned int channelCount = 2;
2971     const unsigned int batchSize = 2;
2972
2973     unsigned int shape[] = { batchSize, channelCount, height, width };
2974
2975     std::vector<float> input0({
2976                                 1.f,  1.f,  1.f,  1.f,  0.f, 0.f, 0.f, 0.f,
2977                                -1.f, -1.f, -1.f, -1.f,  5.f, 5.f, 5.f, 5.f });
2978
2979     std::vector<float> input1({
2980                                0.f, 0.f, -0.f, -0.f,  0.f, 0.f, -0.f, -0.f,
2981                                0.f, 0.f, -0.f, -0.f,  5.f, 5.f,  5.f,  5.f });
2982
2983     std::vector<float> output({
2984                                INFINITY, INFINITY, -INFINITY, -INFINITY,  NAN, NAN, -NAN, -NAN,
2985                                -INFINITY, -INFINITY, INFINITY, INFINITY,  1, 1, 1, 1 });
2986
2987     return DivisionTestHelper<armnn::DataType::Float32>(workloadFactory,
2988                                                         memoryManager,
2989                                                         shape, input0, 1.0f, 0,
2990                                                         shape, input1, 1.0f, 0,
2991                                                         shape, output, 1.0f, 0);
2992 }
2993
2994 LayerTestResult<float,4> DivisionTest(
2995     armnn::IWorkloadFactory& workloadFactory,
2996     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2997 {
2998     const unsigned int width = 2;
2999     const unsigned int height = 2;
3000     const unsigned int channelCount = 2;
3001     const unsigned int batchSize = 2;
3002
3003     unsigned int shape[] = { batchSize, channelCount, height, width };
3004
3005     std::vector<float> input0({
3006                                       2,  2,  2,  2,    3,  3,  3,  3,
3007                                       4,  4,  4,  4,    5,  5,  5,  5 });
3008
3009     std::vector<float> input1({
3010                                       1,  1,  1,  1,    2,  2,  2,  2,
3011                                       4,  4,  4,  4,    4,  4,  4,  4 });
3012
3013     std::vector<float> output({
3014                                       2,  2,  2,  2,    1.5,  1.5,  1.5,  1.5,
3015                                       1, 1, 1, 1,  1.25, 1.25, 1.25, 1.25 });
3016
3017
3018     return DivisionTestHelper<armnn::DataType::Float32>(workloadFactory,
3019                                                         memoryManager,
3020                                                         shape, input0, 1.0f, 0,
3021                                                         shape, input1, 1.0f, 0,
3022                                                         shape, output, 1.0f, 0);
3023 }
3024
3025 LayerTestResult<float, 4> DivisionBroadcast1ElementTest(
3026     armnn::IWorkloadFactory& workloadFactory,
3027     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3028 {
3029     unsigned int shape0[] = { 1, 2, 2, 2 };
3030     std::vector<float> input0({ 2, 4, 6, 8, 10, 12, 14, 16});
3031
3032     unsigned int shape1[] = { 1, 1, 1, 1 };
3033     std::vector<float> input1({ 2 });
3034
3035     std::vector<float> output({ 1, 2, 3, 4, 5, 6, 7, 8});
3036
3037
3038     return DivisionTestHelper<armnn::DataType::Float32>(workloadFactory,
3039                                                         memoryManager,
3040                                                         shape0, input0, 1.0f, 0,
3041                                                         shape1, input1, 1.0f, 0,
3042                                                         shape0, output, 1.0f, 0);
3043 }
3044
3045 LayerTestResult<float, 4> DivisionBroadcast1DVectorTest(
3046     armnn::IWorkloadFactory& workloadFactory,
3047     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3048 {
3049     unsigned int shape0[] = { 1, 3, 3, 2 };
3050     std::vector<float> input0({
3051                                       1,   4,       3,  8,      5, 12,
3052                                       7,   16,      9, 20,     11, 24,
3053                                       13,  28,     15, 32,     17, 36});
3054
3055     unsigned int shape1[] = { 1, 1, 1, 2 };
3056     std::vector<float> input1({ 1, 2 });
3057
3058     std::vector<float> output({
3059                                       1,   2,      3,  4,      5,  6,
3060                                       7,   8,      9, 10,     11, 12,
3061                                       13, 14,     15, 16,     17, 18});
3062
3063     return DivisionTestHelper<armnn::DataType::Float32>(workloadFactory,
3064                                                         memoryManager,
3065                                                         shape0, input0, 1.0f, 0,
3066                                                         shape1, input1, 1.0f, 0,
3067                                                         shape0, output, 1.0f, 0);
3068 }
3069
3070 LayerTestResult<uint8_t,4> DivisionUint8Test(
3071     armnn::IWorkloadFactory& workloadFactory,
3072     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3073 {
3074     const unsigned int width = 2;
3075     const unsigned int height = 2;
3076     const unsigned int channelCount = 2;
3077     const unsigned int batchSize = 2;
3078
3079     unsigned int shape[] = { batchSize, channelCount, height, width };
3080
3081     std::vector<uint8_t> input0({2,  2,  2,  2,    3,  3,  3,  3,
3082                                  4,  4,  4,  4,    5,  5,  5,  5 });
3083
3084     std::vector<uint8_t> input1({1,  1,  1,  1,    2,  2,  2,  2,
3085                                  4,  4,  4,  4,    4,  4,  4,  4 });
3086
3087     std::vector<uint8_t> output({8,  8,  8,  8,    6,  6,  6,  6,
3088                                  4,  4,  4,  4,    5,  5,  5,  5});
3089
3090
3091     return DivisionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
3092                                                                 memoryManager,
3093                                                                 shape, input0, 1.0f,  0,
3094                                                                 shape, input1, 1.0f,  0,
3095                                                                 shape, output, 0.25f, 0);
3096 }
3097
3098 LayerTestResult<uint8_t, 4> DivisionBroadcast1ElementUint8Test(
3099     armnn::IWorkloadFactory& workloadFactory,
3100     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3101 {
3102     unsigned int shape0[] = { 1, 2, 2, 2 };
3103     std::vector<uint8_t> input0({ 2, 4, 6, 8, 10, 12, 14, 16});
3104
3105     unsigned int shape1[] = { 1, 1, 1, 1 };
3106     std::vector<uint8_t> input1({ 2 });
3107
3108     std::vector<uint8_t> output({ 1, 2, 3, 4, 5, 6, 7, 8});
3109
3110     return DivisionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
3111                                                                 memoryManager,
3112                                                                 shape0, input0, 1.0f, 0,
3113                                                                 shape1, input1, 1.0f, 0,
3114                                                                 shape0, output, 1.0f, 0);
3115 }
3116
3117 LayerTestResult<uint8_t, 4> DivisionBroadcast1DVectorUint8Test(
3118     armnn::IWorkloadFactory& workloadFactory,
3119     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3120 {
3121     unsigned int shape0[] = { 1, 3, 3, 2 };
3122     std::vector<uint8_t> input0({1,   4,     3,  8,      5,  12,
3123                                  7,   16,    9,  20,     11, 24,
3124                                  13,  28,    15, 32,     17, 36});
3125
3126     unsigned int shape1[] = { 1, 1, 1, 2 };
3127     std::vector<uint8_t> input1({ 1, 2 });
3128
3129     std::vector<uint8_t> output({1,   2,      3,  4,      5,  6,
3130                                  7,   8,      9, 10,     11, 12,
3131                                  13, 14,     15, 16,     17, 18});
3132
3133     return DivisionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
3134                                                                 memoryManager,
3135                                                                 shape0, input0, 1.0f, 0,
3136                                                                 shape1, input1, 1.0f, 0,
3137                                                                 shape0, output, 1.0f, 0);
3138 }
3139
3140 LayerTestResult<int16_t,4> DivisionInt16Test(
3141     armnn::IWorkloadFactory& workloadFactory,
3142     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3143 {
3144     unsigned int shape[] = { 2, 2, 2, 2 };
3145
3146     std::vector<int16_t> input0({2,  2,  2,  2,    3,  3,  3,  3,
3147                                  4,  4,  4,  4,    5,  5,  5,  5 });
3148
3149     std::vector<int16_t> input1({1,  1,  1,  1,    2,  2,  2,  2,
3150                                  4,  4,  4,  4,    4,  4,  4,  4 });
3151
3152     std::vector<int16_t> output({8,  8,  8,  8,    6,  6,  6,  6,
3153                                  4,  4,  4,  4,    5,  5,  5,  5});
3154
3155
3156     return DivisionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
3157                                                                 memoryManager,
3158                                                                 shape, input0, 1.0f,  0,
3159                                                                 shape, input1, 1.0f,  0,
3160                                                                 shape, output, 0.25f, 0);
3161 }
3162
3163 LayerTestResult<int16_t, 4> DivisionBroadcast1ElementInt16Test(
3164     armnn::IWorkloadFactory& workloadFactory,
3165     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3166 {
3167     unsigned int shape0[] = { 1, 2, 2, 2 };
3168     std::vector<int16_t> input0({ 2, 4, 6, 8, 10, 12, 14, 16});
3169
3170     unsigned int shape1[] = { 1, 1, 1, 1 };
3171     std::vector<int16_t> input1({ 2 });
3172
3173     std::vector<int16_t> output({ 1, 2, 3, 4, 5, 6, 7, 8});
3174
3175     return DivisionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
3176                                                                 memoryManager,
3177                                                                 shape0, input0, 1.0f, 0,
3178                                                                 shape1, input1, 1.0f, 0,
3179                                                                 shape0, output, 1.0f, 0);
3180 }
3181
3182 LayerTestResult<int16_t, 4> DivisionBroadcast1DVectorInt16Test(
3183     armnn::IWorkloadFactory& workloadFactory,
3184     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3185 {
3186     unsigned int shape0[] = { 1, 3, 3, 2 };
3187     std::vector<int16_t> input0({1,   4,     3,  8,      5,  12,
3188                                  7,   16,    9,  20,     11, 24,
3189                                  13,  28,    15, 32,     17, 36});
3190
3191     unsigned int shape1[] = { 1, 1, 1, 2 };
3192     std::vector<int16_t> input1({ 1, 2 });
3193
3194     std::vector<int16_t> output({1,   2,      3,  4,      5,  6,
3195                                  7,   8,      9, 10,     11, 12,
3196                                  13, 14,     15, 16,     17, 18});
3197
3198     return DivisionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
3199                                                                 memoryManager,
3200                                                                 shape0, input0, 1.0f, 0,
3201                                                                 shape1, input1, 1.0f, 0,
3202                                                                 shape0, output, 1.0f, 0);
3203 }
3204
3205 template<typename DescriptorType>
3206 std::unique_ptr<armnn::IWorkload> CreateWorkload(
3207     const armnn::IWorkloadFactory& workloadFactory,
3208     const armnn::WorkloadInfo& info,
3209     const DescriptorType& descriptor)
3210 {
3211     return CreateWorkload(workloadFactory, info, descriptor);
3212 };
3213
3214 template<>
3215 std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::MaximumQueueDescriptor>(
3216     const armnn::IWorkloadFactory& workloadFactory,
3217     const armnn::WorkloadInfo& info,
3218     const armnn::MaximumQueueDescriptor& descriptor)
3219 {
3220     return workloadFactory.CreateMaximum(descriptor, info);
3221 }
3222
3223 template<>
3224 std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::MinimumQueueDescriptor>(
3225     const armnn::IWorkloadFactory& workloadFactory,
3226     const armnn::WorkloadInfo& info,
3227     const armnn::MinimumQueueDescriptor& descriptor)
3228 {
3229     return workloadFactory.CreateMinimum(descriptor, info);
3230 }
3231
3232 template<>
3233 std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::EqualQueueDescriptor>(
3234         const armnn::IWorkloadFactory& workloadFactory,
3235         const armnn::WorkloadInfo& info,
3236         const armnn::EqualQueueDescriptor& descriptor)
3237 {
3238     return workloadFactory.CreateEqual(descriptor, info);
3239 }
3240
3241 template<>
3242 std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::GreaterQueueDescriptor>(
3243         const armnn::IWorkloadFactory& workloadFactory,
3244         const armnn::WorkloadInfo& info,
3245         const armnn::GreaterQueueDescriptor& descriptor)
3246 {
3247     return workloadFactory.CreateGreater(descriptor, info);
3248 }
3249
3250 namespace {
3251
3252 template <typename Descriptor,
3253           armnn::DataType ArmnnTypeInput,
3254           armnn::DataType ArmnnTypeOutput,
3255           typename TInput = armnn::ResolveType<ArmnnTypeInput>,
3256           typename TOutput = armnn::ResolveType<ArmnnTypeOutput>>
3257 LayerTestResult<TOutput, 4> ElementwiseTestHelper(
3258     armnn::IWorkloadFactory & workloadFactory,
3259     const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,
3260     const unsigned int shape0[4], std::vector<TInput> values0,
3261     const unsigned int shape1[4], std::vector<TInput> values1,
3262     const unsigned int outShape[4], std::vector<TOutput> outValues,
3263     float qScale = 0.0f, int qOffset = 0)
3264 {
3265     const uint32_t dimensionCount = 4;
3266     armnn::TensorInfo inputTensorInfo0{dimensionCount, shape0, ArmnnTypeInput};
3267     armnn::TensorInfo inputTensorInfo1{dimensionCount, shape1, ArmnnTypeInput};
3268     armnn::TensorInfo outputTensorInfo{dimensionCount, outShape, ArmnnTypeOutput};
3269
3270     auto input0 = MakeTensor<TInput, 4>(inputTensorInfo0, values0);
3271     auto input1 = MakeTensor<TInput, 4>(inputTensorInfo1, values1);
3272
3273     if (armnn::IsQuantizedType<TInput>())
3274     {
3275         inputTensorInfo0.SetQuantizationScale(qScale);
3276         inputTensorInfo0.SetQuantizationOffset(qOffset);
3277
3278         inputTensorInfo1.SetQuantizationScale(qScale);
3279         inputTensorInfo1.SetQuantizationOffset(qOffset);
3280
3281         outputTensorInfo.SetQuantizationScale(qScale);
3282         outputTensorInfo.SetQuantizationOffset(qOffset);
3283     }
3284
3285     LayerTestResult<TOutput,4> ret(outputTensorInfo);
3286
3287     if(ArmnnTypeOutput == armnn::DataType::Boolean)
3288     {
3289         ret.compareBoolean = true;
3290     }
3291
3292     std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
3293     std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
3294     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3295
3296     Descriptor data;
3297     armnn::WorkloadInfo info;
3298     AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
3299     AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
3300     AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
3301     auto workload = CreateWorkload<Descriptor>(workloadFactory, info, data);
3302
3303     inputHandle0->Allocate();
3304     inputHandle1->Allocate();
3305     outputHandle->Allocate();
3306
3307     CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
3308     CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
3309
3310     workload->PostAllocationConfigure();
3311     ExecuteWorkload(*workload, memoryManager);
3312
3313     CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
3314
3315     ret.outputExpected = MakeTensor<TOutput, 4>(outputTensorInfo, outValues);
3316     return ret;
3317 }
3318
3319 template <typename Descriptor, armnn::DataType ArmnnT, typename T = armnn::ResolveType<ArmnnT>>
3320 LayerTestResult<T, 4> ElementwiseTestHelper(
3321     armnn::IWorkloadFactory & workloadFactory,
3322     const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,
3323     const unsigned int shape0[4], std::vector<T> values0,
3324     const unsigned int shape1[4], std::vector<T> values1,
3325     const unsigned int outShape[4], std::vector<T> outValues,
3326     float qScale = 0.0f, int qOffset = 0)
3327 {
3328     return ElementwiseTestHelper<Descriptor, ArmnnT, ArmnnT>
3329         (workloadFactory,
3330          memoryManager,
3331          shape0,
3332          values0,
3333          shape1,
3334          values1,
3335          outShape,
3336          outValues,
3337          qScale,
3338          qOffset);
3339 }
3340 }
3341
3342 LayerTestResult<uint8_t, 4> EqualSimpleTest(armnn::IWorkloadFactory& workloadFactory,
3343                                             const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3344 {
3345     const unsigned int width = 2;
3346     const unsigned int height = 2;
3347     const unsigned int channelCount = 2;
3348     const unsigned int batchSize = 2;
3349
3350     unsigned int shape[] = { batchSize, channelCount, height, width };
3351
3352     std::vector<float> input0({ 1, 1, 1, 1,  5, 5, 5, 5,
3353                                 3, 3, 3, 3,  4, 4, 4, 4 });
3354
3355     std::vector<float> input1({ 1, 1, 1, 1,  3, 3, 3, 3,
3356                                 5, 5, 5, 5,  4, 4, 4, 4 });
3357
3358     std::vector<uint8_t> output({ 1, 1, 1, 1,  0, 0, 0, 0,
3359                                   0, 0, 0, 0,  1, 1, 1, 1 });
3360
3361     return ElementwiseTestHelper<armnn::EqualQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
3362         workloadFactory,
3363         memoryManager,
3364         shape,
3365         input0,
3366         shape,
3367         input1,
3368         shape,
3369         output);
3370 }
3371
3372 LayerTestResult<uint8_t, 4> EqualBroadcast1ElementTest(
3373         armnn::IWorkloadFactory& workloadFactory,
3374         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3375 {
3376     unsigned int shape0[] = { 1, 2, 2, 2 };
3377     std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
3378
3379     unsigned int shape1[] = { 1, 1, 1, 1 };
3380     std::vector<float> input1({ 1 });
3381
3382     std::vector<uint8_t> output({ 1, 0, 0, 0, 0, 0, 0, 0});
3383
3384     return ElementwiseTestHelper<armnn::EqualQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
3385         workloadFactory,
3386         memoryManager,
3387         shape0,
3388         input0,
3389         shape1,
3390         input1,
3391         shape0,
3392         output);
3393 }
3394
3395 LayerTestResult<uint8_t, 4> EqualBroadcast1DVectorTest(
3396         armnn::IWorkloadFactory& workloadFactory,
3397         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3398 {
3399     const unsigned int shape0[] = { 1, 2, 2, 3 };
3400     const unsigned int shape1[] = { 1, 1, 1, 3 };
3401
3402     std::vector<float> input0({ 1, 2, 3, 4, 5, 6,
3403                                 7, 8, 9, 10, 11, 12 });
3404
3405     std::vector<float> input1({ 1, 2, 3});
3406
3407     std::vector<uint8_t> output({ 1, 1, 1, 0, 0, 0,
3408                                   0, 0, 0, 0, 0, 0 });
3409
3410     return ElementwiseTestHelper<armnn::EqualQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
3411         workloadFactory,
3412         memoryManager,
3413         shape0,
3414         input0,
3415         shape1,
3416         input1,
3417         shape0,
3418         output);
3419 }
3420
3421 LayerTestResult<uint8_t, 4> EqualUint8Test(
3422         armnn::IWorkloadFactory& workloadFactory,
3423         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3424 {
3425     unsigned int shape[] = { 2, 2, 2, 2 };
3426
3427     // See dequantized values to the right.
3428     std::vector<uint8_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
3429                                   3, 3, 3, 3, 7, 7, 7, 7 });
3430
3431     std::vector<uint8_t> input1({ 2, 2, 2, 2, 6, 6, 6, 6,
3432                                   3, 3, 3, 3, 5, 5, 5, 5 });
3433
3434     std::vector<uint8_t> output({ 0, 0, 0, 0, 1, 1, 1, 1,
3435                                   1, 1, 1, 1, 0, 0, 0, 0 });
3436
3437     return ElementwiseTestHelper<armnn::EqualQueueDescriptor,
3438                                  armnn::DataType::QuantisedAsymm8,
3439                                  armnn::DataType::Boolean>(
3440         workloadFactory,
3441         memoryManager,
3442         shape,
3443         input0,
3444         shape,
3445         input1,
3446         shape,
3447         output,
3448         1.0f,
3449         0);
3450 }
3451
3452 LayerTestResult<uint8_t, 4> EqualBroadcast1ElementUint8Test(
3453         armnn::IWorkloadFactory& workloadFactory,
3454         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3455 {
3456     const unsigned int shape0[] = { 1, 2, 2, 3 };
3457     const unsigned int shape1[] = { 1, 1, 1, 1 };
3458
3459     std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
3460                                   7, 8, 9, 10, 11, 12 });
3461
3462     std::vector<uint8_t> input1({ 1 });
3463
3464     std::vector<uint8_t> output({ 1, 0, 0, 0, 0, 0,
3465                                   0, 0, 0, 0, 0, 0 });
3466
3467     return ElementwiseTestHelper<armnn::EqualQueueDescriptor,
3468                                  armnn::DataType::QuantisedAsymm8,
3469                                  armnn::DataType::Boolean>(
3470         workloadFactory,
3471         memoryManager,
3472         shape0,
3473         input0,
3474         shape1,
3475         input1,
3476         shape0,
3477         output,
3478         1.0f,
3479         0);
3480 }
3481
3482 LayerTestResult<uint8_t, 4> EqualBroadcast1DVectorUint8Test(
3483         armnn::IWorkloadFactory& workloadFactory,
3484         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3485 {
3486     const unsigned int shape0[] = { 1, 2, 2, 3 };
3487     const unsigned int shape1[] = { 1, 1, 1, 3 };
3488
3489     std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
3490                                   7, 8, 9, 10, 11, 12 });
3491
3492     std::vector<uint8_t> input1({ 1, 1, 3});
3493
3494     std::vector<uint8_t> output({ 1, 0, 1, 0, 0, 0,
3495                                   0, 0, 0, 0, 0, 0 });
3496
3497     return ElementwiseTestHelper<armnn::EqualQueueDescriptor,
3498                                  armnn::DataType::QuantisedAsymm8,
3499                                  armnn::DataType::Boolean>(
3500         workloadFactory,
3501         memoryManager,
3502         shape0,
3503         input0,
3504         shape1,
3505         input1,
3506         shape0,
3507         output,
3508         1.0f,
3509         0);
3510 }
3511
3512 LayerTestResult<uint8_t, 4> GreaterSimpleTest(armnn::IWorkloadFactory& workloadFactory,
3513                                             const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3514 {
3515     const unsigned int width = 2;
3516     const unsigned int height = 2;
3517     const unsigned int channelCount = 2;
3518     const unsigned int batchSize = 2;
3519
3520     unsigned int shape[] = { batchSize, channelCount, height, width };
3521
3522     std::vector<float> input0({ 1, 1, 1, 1,  5, 5, 5, 5,
3523                                 3, 3, 3, 3,  4, 4, 4, 4 });
3524
3525     std::vector<float> input1({ 1, 1, 1, 1,  3, 3, 3, 3,
3526                                 5, 5, 5, 5,  4, 4, 4, 4 });
3527
3528     std::vector<uint8_t> output({ 0, 0, 0, 0,  1, 1, 1, 1,
3529                                   0, 0, 0, 0,  0, 0, 0, 0 });
3530
3531     return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
3532         workloadFactory,
3533         memoryManager,
3534         shape,
3535         input0,
3536         shape,
3537         input1,
3538         shape,
3539         output);
3540 }
3541
3542 LayerTestResult<uint8_t, 4> GreaterBroadcast1ElementTest(
3543         armnn::IWorkloadFactory& workloadFactory,
3544         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3545 {
3546     unsigned int shape0[] = { 1, 2, 2, 2 };
3547     std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
3548
3549     unsigned int shape1[] = { 1, 1, 1, 1 };
3550     std::vector<float> input1({ 1 });
3551
3552     std::vector<uint8_t> output({ 0, 1, 1, 1, 1, 1, 1, 1});
3553
3554     return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
3555         workloadFactory,
3556         memoryManager,
3557         shape0,
3558         input0,
3559         shape1,
3560         input1,
3561         shape0,
3562         output);
3563 }
3564
3565 LayerTestResult<uint8_t, 4> GreaterBroadcast1DVectorTest(
3566         armnn::IWorkloadFactory& workloadFactory,
3567         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3568 {
3569     const unsigned int shape0[] = { 1, 2, 2, 3 };
3570     const unsigned int shape1[] = { 1, 1, 1, 3 };
3571
3572     std::vector<float> input0({ 1, 2.9f, 2.1f, 4, 5, 6,
3573                                 7, 8, 9, 10, 11, 12 });
3574
3575     std::vector<float> input1({ 1, 3, 2});
3576
3577     std::vector<uint8_t> output({ 0, 0, 1, 1, 1, 1,
3578                                   1, 1, 1, 1, 1, 1 });
3579
3580     return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
3581         workloadFactory,
3582         memoryManager,
3583         shape0,
3584         input0,
3585         shape1,
3586         input1,
3587         shape0,
3588         output);
3589 }
3590
3591 LayerTestResult<uint8_t, 4> GreaterUint8Test(
3592         armnn::IWorkloadFactory& workloadFactory,
3593         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3594 {
3595     unsigned int shape[] = { 2, 2, 2, 2 };
3596
3597     // See dequantized values to the right.
3598     std::vector<uint8_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
3599                                   3, 3, 3, 3, 5, 5, 5, 5 });
3600
3601     std::vector<uint8_t> input1({ 2, 2, 2, 2, 6, 6, 6, 6,
3602                                   2, 2, 2, 2, 5, 5, 5, 5 });
3603
3604     std::vector<uint8_t> output({ 0, 0, 0, 0, 0, 0, 0, 0,
3605                                   1, 1, 1, 1, 0, 0, 0, 0 });
3606
3607     return ElementwiseTestHelper<armnn::GreaterQueueDescriptor,
3608                                  armnn::DataType::QuantisedAsymm8,
3609                                  armnn::DataType::Boolean>(
3610         workloadFactory,
3611         memoryManager,
3612         shape,
3613         input0,
3614         shape,
3615         input1,
3616         shape,
3617         output,
3618         1.0f,
3619         0);
3620 }
3621
3622 LayerTestResult<uint8_t, 4> GreaterBroadcast1ElementUint8Test(
3623         armnn::IWorkloadFactory& workloadFactory,
3624         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3625 {
3626     const unsigned int shape0[] = { 1, 2, 2, 3 };
3627     const unsigned int shape1[] = { 1, 1, 1, 1 };
3628
3629     std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
3630                                   7, 8, 9, 10, 11, 12 });
3631
3632     std::vector<uint8_t> input1({ 1 });
3633
3634     std::vector<uint8_t> output({ 0, 1, 1, 1, 1, 1,
3635                                   1, 1, 1, 1, 1, 1 });
3636
3637     return ElementwiseTestHelper<armnn::GreaterQueueDescriptor,
3638                                  armnn::DataType::QuantisedAsymm8,
3639                                  armnn::DataType::Boolean>(
3640         workloadFactory,
3641         memoryManager,
3642         shape0,
3643         input0,
3644         shape1,
3645         input1,
3646         shape0,
3647         output,
3648         1.0f,
3649         0);
3650 }
3651
3652 LayerTestResult<uint8_t, 4> GreaterBroadcast1DVectorUint8Test(
3653         armnn::IWorkloadFactory& workloadFactory,
3654         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3655 {
3656     const unsigned int shape0[] = { 1, 2, 2, 3 };
3657     const unsigned int shape1[] = { 1, 1, 1, 3 };
3658
3659     std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
3660                                   7, 8, 9, 10, 11, 12 });
3661
3662     std::vector<uint8_t> input1({ 1, 1, 3});
3663
3664     std::vector<uint8_t> output({ 0, 1, 0, 1, 1, 1,
3665                                   1, 1, 1, 1, 1, 1 });
3666
3667     return ElementwiseTestHelper<armnn::GreaterQueueDescriptor,
3668                                  armnn::DataType::QuantisedAsymm8,
3669                                  armnn::DataType::Boolean>(
3670         workloadFactory,
3671         memoryManager,
3672         shape0,
3673         input0,
3674         shape1,
3675         input1,
3676         shape0,
3677         output,
3678         1.0f,
3679         0);
3680 }
3681
3682 LayerTestResult<float, 4> MaximumSimpleTest(armnn::IWorkloadFactory& workloadFactory,
3683                                            const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3684 {
3685     const unsigned int width = 2;
3686     const unsigned int height = 2;
3687     const unsigned int channelCount = 2;
3688     const unsigned int batchSize = 2;
3689
3690     unsigned int shape[] = { batchSize, channelCount, height, width };
3691
3692     std::vector<float> input0({ 1, 1, 1, 1,  5, 5, 5, 5,
3693                                 3, 3, 3, 3,  4, 4, 4, 4 });
3694
3695     std::vector<float> input1({ 2, 2, 2, 2,  3, 3, 3, 3,
3696                                 4, 4, 4, 4,  5, 5, 5, 5 });
3697
3698     std::vector<float> output({ 2, 2, 2, 2,  5, 5, 5, 5,
3699                                 4, 4, 4, 4,  5, 5, 5, 5 });
3700
3701     return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::Float32>(
3702         workloadFactory,
3703         memoryManager,
3704         shape,
3705         input0,
3706         shape,
3707         input1,
3708         shape,
3709         output);
3710 }
3711
3712 LayerTestResult<float, 4> MaximumBroadcast1ElementTest(
3713         armnn::IWorkloadFactory& workloadFactory,
3714         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3715 {
3716     unsigned int shape0[] = { 1, 2, 2, 2 };
3717     std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
3718
3719     unsigned int shape1[] = { 1, 1, 1, 1 };
3720     std::vector<float> input1({ 2 });
3721
3722     std::vector<float> output({ 2, 2, 3, 4, 5, 6, 7, 8});
3723
3724     return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::Float32>(
3725         workloadFactory,
3726         memoryManager,
3727         shape0,
3728         input0,
3729         shape1,
3730         input1,
3731         shape0,
3732         output);
3733 }
3734
3735 LayerTestResult<float, 4> MaximumBroadcast1DVectorTest(
3736         armnn::IWorkloadFactory& workloadFactory,
3737         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3738 {
3739     const unsigned int shape0[] = { 1, 2, 2, 3 };
3740     const unsigned int shape1[] = { 1, 1, 1, 3 };
3741
3742     std::vector<float> input0({ 1, 2, 3, 4, 5, 6,
3743                                   7, 8, 9, 10, 11, 12 });
3744
3745     std::vector<float> input1({ 1, 2, 3});
3746
3747     std::vector<float> output({ 1, 2, 3, 4, 5, 6,
3748                                 7, 8, 9, 10, 11, 12 });
3749
3750     return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::Float32>(
3751         workloadFactory,
3752         memoryManager,
3753         shape0,
3754         input0,
3755         shape1,
3756         input1,
3757         shape0,
3758         output);
3759 }
3760
3761 LayerTestResult<uint8_t, 4> MaximumUint8Test(
3762         armnn::IWorkloadFactory& workloadFactory,
3763         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3764 {
3765     unsigned int shape[] = { 2, 2, 2, 2 };
3766
3767     // See dequantized values to the right.
3768     std::vector<uint8_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
3769                                   3, 3, 3, 3, 4, 4, 4, 4 });
3770
3771     std::vector<uint8_t> input1({ 2, 2, 2, 2, 3, 3, 3, 3,
3772                                   4, 4, 4, 4, 5, 5, 5, 5 });
3773
3774     std::vector<uint8_t> output({ 2, 2, 2, 2, 6, 6, 6, 6,
3775                                   4, 4, 4, 4, 5, 5, 5, 5 });
3776
3777     return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
3778         workloadFactory,
3779         memoryManager,
3780         shape,
3781         input0,
3782         shape,
3783         input1,
3784         shape,
3785         output,
3786         1.0f,
3787         0);
3788 }
3789
3790 LayerTestResult<uint8_t, 4> MaximumBroadcast1ElementUint8Test(
3791         armnn::IWorkloadFactory& workloadFactory,
3792         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3793 {
3794     const unsigned int shape0[] = { 1, 2, 2, 3 };
3795     const unsigned int shape1[] = { 1, 1, 1, 1 };
3796
3797     std::vector<uint8_t> input0({ 1, 2, 3, 4,  5, 6,
3798                                   7, 8, 9, 10, 11, 12 });
3799
3800     std::vector<uint8_t> input1({2});
3801
3802     std::vector<uint8_t> output({ 2, 2, 3, 4, 5, 6,
3803                                   7, 8, 9, 10, 11, 12 });
3804
3805     return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
3806         workloadFactory,
3807         memoryManager,
3808         shape0,
3809         input0,
3810         shape1,
3811         input1,
3812         shape0,
3813         output,
3814         1.0f,
3815         0);
3816 }
3817
3818 LayerTestResult<uint8_t, 4> MaximumBroadcast1DVectorUint8Test(
3819         armnn::IWorkloadFactory& workloadFactory,
3820         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3821 {
3822     const unsigned int shape0[] = { 1, 2, 2, 3 };
3823     const unsigned int shape1[] = { 1, 1, 1, 3 };
3824
3825     std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
3826                                   7, 8, 9, 10, 11, 12 });
3827
3828     std::vector<uint8_t> input1({ 1, 10, 3});
3829
3830     std::vector<uint8_t> output({ 1, 10, 3, 4, 10, 6,
3831                                   7, 10, 9, 10, 11, 12 });
3832
3833     return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
3834         workloadFactory,
3835         memoryManager,
3836         shape0,
3837         input0,
3838         shape1,
3839         input1,
3840         shape0,
3841         output,
3842         1.0f,
3843         0);
3844 }
3845
3846 LayerTestResult<int16_t, 4> MaximumInt16Test(
3847     armnn::IWorkloadFactory& workloadFactory,
3848     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3849 {
3850     unsigned int shape[] = { 2, 2, 2, 2 };
3851
3852     std::vector<int16_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
3853                                   3, 3, 3, 3, 4, 4, 4, 4 });
3854
3855     std::vector<int16_t> input1({ 2, 2, 2, 2, 3, 3, 3, 3,
3856                                   4, 4, 4, 4, 5, 5, 5, 5 });
3857
3858     std::vector<int16_t> output({ 2, 2, 2, 2, 6, 6, 6, 6,
3859                                   4, 4, 4, 4, 5, 5, 5, 5 });
3860
3861     return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
3862         workloadFactory,
3863         memoryManager,
3864         shape,
3865         input0,
3866         shape,
3867         input1,
3868         shape,
3869         output,
3870         1.0f,
3871         0);
3872 }
3873
3874 LayerTestResult<int16_t, 4> MaximumBroadcast1ElementInt16Test(
3875     armnn::IWorkloadFactory& workloadFactory,
3876     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3877 {
3878     const unsigned int shape0[] = { 1, 2, 2, 3 };
3879     const unsigned int shape1[] = { 1, 1, 1, 1 };
3880
3881     std::vector<int16_t> input0({ 1, 2, 3, 4,  5, 6,
3882                                   7, 8, 9, 10, 11, 12 });
3883
3884     std::vector<int16_t> input1({2});
3885
3886     std::vector<int16_t> output({ 2, 2, 3, 4, 5, 6,
3887                                   7, 8, 9, 10, 11, 12 });
3888
3889     return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
3890         workloadFactory,
3891         memoryManager,
3892         shape0,
3893         input0,
3894         shape1,
3895         input1,
3896         shape0,
3897         output,
3898         1.0f,
3899         0);
3900 }
3901
3902 LayerTestResult<int16_t, 4> MaximumBroadcast1DVectorInt16Test(
3903     armnn::IWorkloadFactory& workloadFactory,
3904     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3905 {
3906     const unsigned int shape0[] = { 1, 2, 2, 3 };
3907     const unsigned int shape1[] = { 1, 1, 1, 3 };
3908
3909     std::vector<int16_t> input0({ 1, 2, 3, 4, 5, 6,
3910                                   7, 8, 9, 10, 11, 12 });
3911
3912     std::vector<int16_t> input1({ 1, 10, 3});
3913
3914     std::vector<int16_t> output({ 1, 10, 3, 4, 10, 6,
3915                                   7, 10, 9, 10, 11, 12 });
3916
3917     return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
3918         workloadFactory,
3919         memoryManager,
3920         shape0,
3921         input0,
3922         shape1,
3923         input1,
3924         shape0,
3925         output,
3926         1.0f,
3927         0);
3928 }
3929
3930 LayerTestResult<float, 4> MinimumBroadcast1ElementTest1(
3931     armnn::IWorkloadFactory& workloadFactory,
3932     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3933 {
3934     unsigned int shape0[] = { 1, 2, 2, 2 };
3935     std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
3936
3937     unsigned int shape1[] = { 1, 1, 1, 1 };
3938     std::vector<float> input1({ 2 });
3939
3940     std::vector<float> output({ 1, 2, 2, 2, 2, 2, 2, 2});
3941
3942     return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::Float32>(
3943         workloadFactory,
3944         memoryManager,
3945         shape0,
3946         input0,
3947         shape1,
3948         input1,
3949         shape0,
3950         output);
3951 }
3952
3953
3954 LayerTestResult<float, 4> MinimumBroadcast1ElementTest2(
3955     armnn::IWorkloadFactory& workloadFactory,
3956     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3957 {
3958     unsigned int shape0[] = { 1, 2, 2, 2 };
3959     std::vector<float> input0({ 1, 6, 3, 2, 8, 9, 1, 10});
3960
3961     unsigned int shape1[] = { 1, 1, 1, 1 };
3962     std::vector<float> input1({ 5 });
3963
3964     std::vector<float> output({ 1, 5, 3, 2, 5, 5, 1, 5});
3965
3966     return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::Float32>(
3967         workloadFactory,
3968         memoryManager,
3969         shape0,
3970         input0,
3971         shape1,
3972         input1,
3973         shape0,
3974         output);
3975 }
3976
3977 LayerTestResult<uint8_t, 4> MinimumBroadcast1DVectorUint8Test(
3978     armnn::IWorkloadFactory & workloadFactory,
3979     const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager)
3980 {
3981     const unsigned int shape0[] = { 1, 2, 2, 3 };
3982     const unsigned int shape1[] = { 1, 1, 1, 3 };
3983
3984     std::vector<uint8_t> input0({ 1, 2, 3, 3, 2, 1,
3985                                   7, 1, 2, 3, 4, 5 });
3986
3987     std::vector<uint8_t> input1({ 1, 2, 3});
3988
3989     std::vector<uint8_t> output({ 1, 2, 3, 1, 2, 1,
3990                                   1, 1, 2, 1, 2, 3 });
3991
3992     return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
3993         workloadFactory,
3994         memoryManager,
3995         shape0,
3996         input0,
3997         shape1,
3998         input1,
3999         shape0,
4000         output,
4001         1.0f,
4002         0);
4003 }
4004
4005 LayerTestResult<int16_t, 4> MinimumInt16Test(
4006     armnn::IWorkloadFactory& workloadFactory,
4007     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
4008 {
4009     unsigned int shape[] = { 2, 2, 2, 2 };
4010
4011     std::vector<int16_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
4012                                   3, 3, 3, 3, 4, 4, 4, 4 });
4013
4014     std::vector<int16_t> input1({ 2, 2, 2, 2, 3, 3, 3, 3,
4015                                   4, 4, 4, 4, 5, 5, 5, 5 });
4016
4017     std::vector<int16_t> output({ 1, 1, 1, 1, 3, 3, 3, 3,
4018                                   3, 3, 3, 3, 4, 4, 4, 4 });
4019
4020     return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
4021         workloadFactory,
4022         memoryManager,
4023         shape,
4024         input0,
4025         shape,
4026         input1,
4027         shape,
4028         output,
4029         1.0f,
4030         0);
4031 }
4032
4033 LayerTestResult<int16_t, 4> MinimumBroadcast1ElementInt16Test(
4034     armnn::IWorkloadFactory& workloadFactory,
4035     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
4036 {
4037     const unsigned int shape0[] = { 1, 2, 2, 3 };
4038     const unsigned int shape1[] = { 1, 1, 1, 1 };
4039
4040     std::vector<int16_t> input0({ 1, 2, 3, 4,  5, 6,
4041                                   7, 8, 9, 10, 11, 12 });
4042
4043     std::vector<int16_t> input1({2});
4044
4045     std::vector<int16_t> output({ 1, 2, 2, 2, 2, 2,
4046                                   2, 2, 2, 2, 2, 2 });
4047
4048     return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
4049         workloadFactory,
4050         memoryManager,
4051         shape0,
4052         input0,
4053         shape1,
4054         input1,
4055         shape0,
4056         output,
4057         1.0f,
4058         0);
4059 }
4060
4061 LayerTestResult<int16_t, 4> MinimumBroadcast1DVectorInt16Test(
4062     armnn::IWorkloadFactory& workloadFactory,
4063     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
4064 {
4065     const unsigned int shape0[] = { 1, 2, 2, 3 };
4066     const unsigned int shape1[] = { 1, 1, 1, 3 };
4067
4068     std::vector<int16_t> input0({ 1, 2, 3, 4, 5, 6,
4069                                   7, 8, 9, 10, 11, 12 });
4070
4071     std::vector<int16_t> input1({ 1, 10, 3});
4072
4073     std::vector<int16_t> output({ 1, 2, 3, 1, 5, 3,
4074                                   1, 8, 3, 1, 10, 3 });
4075
4076     return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
4077         workloadFactory,
4078         memoryManager,
4079         shape0,
4080         input0,
4081         shape1,
4082         input1,
4083         shape0,
4084         output,
4085         1.0f,
4086         0);
4087 }
4088
4089 namespace {
4090 LayerTestResult<float,4> MultiplicationTestHelper(
4091     armnn::IWorkloadFactory& workloadFactory,
4092     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4093     const unsigned int shape0[4],
4094     const std::vector<float> & values0,
4095     const unsigned int shape1[4],
4096     const std::vector<float> & values1,
4097     const unsigned int outShape[4],
4098     const std::vector<float> & outValues)
4099 {
4100     const uint32_t dimensionCount = 4;
4101     armnn::TensorInfo inputTensorInfo0{dimensionCount, shape0, armnn::DataType::Float32};
4102     armnn::TensorInfo inputTensorInfo1{dimensionCount, shape1, armnn::DataType::Float32};
4103     armnn::TensorInfo outputTensorInfo{dimensionCount, outShape, armnn::DataType::Float32};
4104
4105     auto input0 = MakeTensor<float, 4>(inputTensorInfo0, values0);
4106     auto input1 = MakeTensor<float, 4>(inputTensorInfo1, values1);
4107
4108     LayerTestResult<float,4> ret(outputTensorInfo);
4109
4110     std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
4111     std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
4112     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4113
4114     armnn::MultiplicationQueueDescriptor data;
4115     armnn::WorkloadInfo info;
4116     AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
4117     AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
4118     AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
4119
4120     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
4121
4122     inputHandle0->Allocate();
4123     inputHandle1->Allocate();
4124     outputHandle->Allocate();
4125
4126     CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
4127     CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
4128
4129     workload->PostAllocationConfigure();
4130     workload->Execute();
4131
4132     CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
4133
4134     ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outValues);
4135     return ret;
4136 }
4137 } // anonymous namespace
4138
4139
4140 LayerTestResult<float,4> MultiplicationTest(
4141     armnn::IWorkloadFactory& workloadFactory,
4142     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
4143 {
4144     const unsigned int width = 2;
4145     const unsigned int height = 2;
4146     const unsigned int channelCount = 2;
4147     const unsigned int batchSize = 2;
4148
4149     unsigned int shape[] = { batchSize, channelCount, height, width };
4150
4151     std::vector<float> input0({
4152         1,  1,  1,  1,    2,  2,  2,  2,
4153         3,  3,  3,  3,    4,  4,  4,  4 });
4154
4155     std::vector<float> input1({
4156         2,  2,  2,  2,    3,  3,  3,  3,
4157         4,  4,  4,  4,    5,  5,  5,  5 });
4158
4159     std::vector<float> output({
4160         2,  2,  2,  2,    6,  6,  6,  6,
4161         12, 12, 12, 12,  20, 20, 20, 20 });
4162
4163     return MultiplicationTestHelper(workloadFactory,
4164                                     memoryManager,
4165                                     shape,
4166                                     input0,
4167                                     shape,
4168                                     input1,
4169                                     shape,
4170                                     output);
4171 }
4172
4173 LayerTestResult<float, 4> MultiplicationBroadcast1ElementTest(
4174     armnn::IWorkloadFactory& workloadFactory,
4175     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
4176 {
4177     unsigned int shape0[] = { 1, 2, 2, 2 };
4178     std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
4179
4180     unsigned int shape1[] = { 1, 1, 1, 1 };
4181     std::vector<float> input1({ 2 });
4182
4183     std::vector<float> output({ 2, 4, 6, 8, 10, 12, 14, 16});
4184
4185     return MultiplicationTestHelper(workloadFactory,
4186                                     memoryManager,
4187                                     shape0,
4188                                     input0,
4189                                     shape1,
4190                                     input1,
4191                                     shape0,
4192                                     output);
4193 }
4194
4195 LayerTestResult<float, 4> MultiplicationBroadcast1DVectorTest(
4196     armnn::IWorkloadFactory& workloadFactory,
4197     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
4198 {
4199     unsigned int shape0[] = { 1, 3, 3, 2 };
4200     std::vector<float> input0({
4201         1,   2,      3,  4,      5,  6,
4202         7,   8,      9, 10,     11, 12,
4203         13, 14,     15, 16,     17, 18});
4204
4205     unsigned int shape1[] = { 1, 1, 1, 2 };
4206     std::vector<float> input1({ 1, 2 });
4207
4208     std::vector<float> output({
4209         1,   4,       3,  8,      5, 12,
4210         7,   16,      9, 20,     11, 24,
4211         13,  28,     15, 32,     17, 36});
4212
4213     return MultiplicationTestHelper(workloadFactory,
4214                                     memoryManager,
4215                                     shape0,
4216                                     input0,
4217                                     shape1,
4218                                     input1,
4219                                     shape0,
4220                                     output);
4221 }
4222
4223 LayerTestResult<float,4> CompareMultiplicationTest(
4224     armnn::IWorkloadFactory& workloadFactory,
4225     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4226     armnn::IWorkloadFactory& refWorkloadFactory)
4227 {
4228     const unsigned int width = 16;
4229     const unsigned int height = 32;
4230     const unsigned int channelCount = 2;
4231     const unsigned int batchSize = 5;
4232
4233     armnn::TensorInfo inputTensorInfo0;
4234     armnn::TensorInfo inputTensorInfo1;
4235     armnn::TensorInfo outputTensorInfo;
4236
4237     constexpr unsigned int shape[] = { batchSize, channelCount, height, width };
4238
4239     inputTensorInfo0 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
4240     inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
4241     outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
4242
4243     LayerTestResult<float,4> comparisonResult(outputTensorInfo);
4244
4245     auto input0 = MakeRandomTensor<float, 4>(inputTensorInfo0, 803506992);
4246     auto input1 = MakeRandomTensor<float, 4>(inputTensorInfo1, 54902257);
4247
4248     std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
4249     std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
4250     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4251
4252     std::unique_ptr<armnn::ITensorHandle> inputHandle0Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo0);
4253     std::unique_ptr<armnn::ITensorHandle> inputHandle1Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo1);
4254     std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
4255
4256     armnn::MultiplicationQueueDescriptor data;
4257     armnn::WorkloadInfo info;
4258     AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
4259     AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
4260     AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
4261
4262     armnn::MultiplicationQueueDescriptor refData = data;
4263     armnn::WorkloadInfo refInfo = info;
4264     SetWorkloadInput(refData, refInfo, 0, inputTensorInfo0, inputHandle0Ref.get());
4265     SetWorkloadInput(refData, refInfo, 1, inputTensorInfo1, inputHandle1Ref.get());
4266     SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
4267
4268     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
4269     std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateMultiplication(refData, refInfo);
4270
4271     inputHandle0->Allocate();
4272     inputHandle1->Allocate();
4273     outputHandle->Allocate();
4274     inputHandle0Ref->Allocate();
4275     inputHandle1Ref->Allocate();
4276     outputHandleRef->Allocate();
4277
4278     CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
4279     CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
4280     CopyDataToITensorHandle(inputHandle0Ref.get(), &input0[0][0][0][0]);
4281     CopyDataToITensorHandle(inputHandle1Ref.get(), &input1[0][0][0][0]);
4282
4283     workload->PostAllocationConfigure();
4284     workload->Execute();
4285     workloadRef->PostAllocationConfigure();
4286     workloadRef->Execute();
4287     CopyDataFromITensorHandle(&comparisonResult.output[0][0][0][0], outputHandle.get());
4288     CopyDataFromITensorHandle(&comparisonResult.outputExpected[0][0][0][0], outputHandleRef.get());
4289
4290     return comparisonResult;
4291 }
4292
4293 LayerTestResult<float,4> CompareBatchNormTest(
4294     armnn::IWorkloadFactory& workloadFactory,
4295     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4296     armnn::IWorkloadFactory& refWorkloadFactory)
4297 {
4298     const unsigned int width     = 2;
4299     const unsigned int height    = 3;
4300     const unsigned int channels  = 5;
4301     const unsigned int batchSize = 3;
4302
4303     armnn::TensorInfo inputTensorInfo;
4304     armnn::TensorInfo outputTensorInfo;
4305     armnn::TensorInfo tensorInfo;
4306
4307     constexpr unsigned int shape[]       = {batchSize, channels, height, width};
4308     constexpr unsigned int tensorShape[] = {channels};
4309
4310     inputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
4311     outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
4312     tensorInfo = armnn::TensorInfo(1, tensorShape, armnn::DataType::Float32);
4313
4314     auto input = MakeRandomTensor<float, 4>(inputTensorInfo, 21312);
4315
4316     auto mean     = MakeRandomTensor<float, 1>(tensorInfo, 123);
4317     auto variance = MakeRandomTensor<float, 1>(tensorInfo, 234, 0.0f);
4318     auto beta     = MakeRandomTensor<float, 1>(tensorInfo, 123);
4319     auto gamma    = MakeRandomTensor<float, 1>(tensorInfo, 345);
4320
4321     LayerTestResult<float,4> ret(outputTensorInfo);
4322
4323     std::unique_ptr<armnn::ITensorHandle> inputHandle  = workloadFactory.CreateTensorHandle(inputTensorInfo);
4324     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4325
4326     std::unique_ptr<armnn::ITensorHandle> inputHandleRef  = refWorkloadFactory.CreateTensorHandle(inputTensorInfo);
4327     std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
4328
4329     armnn::BatchNormalizationQueueDescriptor data;
4330     armnn::WorkloadInfo info;
4331     armnn::ScopedCpuTensorHandle meanTensor(tensorInfo);
4332     armnn::ScopedCpuTensorHandle varianceTensor(tensorInfo);
4333     armnn::ScopedCpuTensorHandle betaTensor(tensorInfo);
4334     armnn::ScopedCpuTensorHandle gammaTensor(tensorInfo);
4335
4336     AllocateAndCopyDataToITensorHandle(&meanTensor, &mean[0]);
4337     AllocateAndCopyDataToITensorHandle(&varianceTensor, &variance[0]);
4338     AllocateAndCopyDataToITensorHandle(&betaTensor, &beta[0]);
4339     AllocateAndCopyDataToITensorHandle(&gammaTensor, &gamma[0]);
4340
4341     AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
4342     AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
4343     data.m_Mean             = &meanTensor;
4344     data.m_Variance         = &varianceTensor;
4345     data.m_Beta             = &betaTensor;
4346     data.m_Gamma            = &gammaTensor;
4347     data.m_Parameters.m_Eps = 0.01f;
4348
4349     armnn::BatchNormalizationQueueDescriptor refData = data;
4350     armnn::WorkloadInfo refInfo = info;
4351     SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
4352     SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
4353
4354     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchNormalization(data, info);
4355     std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateBatchNormalization(refData, refInfo);
4356
4357     inputHandle->Allocate();
4358     outputHandle->Allocate();
4359     inputHandleRef->Allocate();
4360     outputHandleRef->Allocate();
4361
4362     CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
4363     CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]);
4364
4365     workload->PostAllocationConfigure();
4366     workload->Execute();
4367     workloadRef->PostAllocationConfigure();
4368     workloadRef->Execute();
4369
4370     CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
4371     CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
4372
4373     return ret;
4374 }
4375
4376 template<typename T>
4377 void PermuteTensorData(
4378         armnn::IWorkloadFactory& workloadFactory,
4379         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4380         const armnn::PermutationVector& mappings,
4381         armnn::TensorInfo & inputTensorInfo,
4382         const T * inputData,
4383         std::vector<T>& outputData)
4384 {
4385     BOOST_ASSERT_MSG(inputData != nullptr, "inputData must not be null");
4386     if (inputData == nullptr)
4387     {
4388         // Nullptr is an error in the test. By returning without doing the concatenation
4389         // I expect the caller to fail the test. It still makes sense to report this as
4390         // an assert for Debug builds.
4391         return;
4392     }
4393
4394     armnn::TensorInfo outputTensorInfo = armnnUtils::Permuted(inputTensorInfo, mappings);
4395
4396     std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
4397     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4398
4399     armnn::PermuteQueueDescriptor queueDescriptor;
4400     queueDescriptor.m_Parameters = armnn::PermuteDescriptor{mappings};
4401     armnn::WorkloadInfo workloadInfo;
4402     AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfo, inputHandle.get());
4403     AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
4404
4405     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePermute(queueDescriptor, workloadInfo);
4406
4407     inputHandle->Allocate();
4408     outputHandle->Allocate();
4409
4410     CopyDataToITensorHandle(inputHandle.get(), inputData);
4411
4412     workload->PostAllocationConfigure();
4413     workload->Execute();
4414
4415     outputData.resize(outputTensorInfo.GetNumElements());
4416     CopyDataFromITensorHandle(&outputData[0], outputHandle.get());
4417     inputTensorInfo = outputTensorInfo;
4418 }
4419
4420 armnn::OriginsDescriptor CreateDescriptorForConcatenation(
4421         const std::vector<armnn::TensorInfo> & inputTensorInfos,
4422         unsigned int concatDim)
4423 {
4424     std::vector<armnn::TensorShape> shapes;
4425     shapes.reserve(inputTensorInfos.size());
4426     for (const armnn::TensorInfo& it: inputTensorInfos)
4427     {
4428         shapes.push_back(it.GetShape());
4429     }
4430
4431     return armnn::CreateDescriptorForConcatenation(shapes.begin(),
4432                                                    shapes.end(),
4433                                                    concatDim);
4434 }
4435
4436 //
4437 // Concatenation is only supported for N and C dimensions for NCHW and the inner most dimension
4438 // In case of <4 dimensions we need to make sure that the concat dimensions are at least
4439 // the 3rd slowest iterating one or the inner most dimension.
4440 //
4441
4442 bool NeedPermuteForConcat(
4443         const std::vector<armnn::TensorInfo> & inputTensorInfos,
4444         unsigned int concatDim)
4445 {
4446     // See note above. Additionally we expect the input shapes to have the
4447     // same number of dimensions.
4448     unsigned int nDimensions = 0;
4449
4450     // Determine the number of dimensions as well as sanity check them
4451     // agains test implementation issues.
4452     for (auto && tensorInfo : inputTensorInfos)
4453     {
4454         if (!nDimensions)
4455         {
4456             nDimensions = tensorInfo.GetShape().GetNumDimensions();
4457         }
4458         else
4459         {
4460             BOOST_ASSERT_MSG(nDimensions == tensorInfo.GetShape().GetNumDimensions(),
4461                 "Input shapes must have the same number of dimensions");
4462         }
4463     }
4464
4465     return (nDimensions < 3 || (nDimensions == 3 && (nDimensions-concatDim) < 3 && (nDimensions-concatDim) != 1));
4466 }
4467
4468 armnn::TensorShape ExpandTensorShapeTo3dForPermute(const armnn::TensorShape & inputShape)
4469 {
4470     unsigned int numDims = inputShape.GetNumDimensions();
4471     if (numDims >= 3)
4472     {
4473         // Nothing to do if the inputShape has at least 3 dimensions.
4474         return inputShape;
4475     }
4476
4477     std::vector<unsigned int> newDims(size_t(3), 1u);
4478     unsigned int expandedBy = 3 - numDims;
4479     for (unsigned int i=0; i<numDims; ++i)
4480     {
4481         newDims[expandedBy+i] = inputShape[i];
4482     }
4483     return armnn::TensorShape(3u, &newDims[0]);
4484 }
4485
4486 void Generate3dPermuteVectorForConcat(
4487         unsigned int numDimensions,
4488         unsigned int & concatDim,
4489         std::pair<armnn::PermutationVector, armnn::PermutationVector> & permutations)
4490 {
4491     BOOST_ASSERT_MSG(numDimensions <= 3,
4492        "Only dimensions 1,2 and 3 are supported by this helper");
4493     unsigned int expandedBy = 3 - numDimensions;
4494     unsigned int expandedConcatAxis = concatDim + expandedBy;
4495
4496     if (expandedConcatAxis == 2)
4497     {
4498         concatDim = 0;
4499         armnn::PermutationVector forwardPermutation({1, 2, 0});
4500         armnn::PermutationVector reversePermutation({2, 0, 1});
4501         permutations = std::make_pair(forwardPermutation, reversePermutation);
4502     }
4503     else if (expandedConcatAxis == 1)
4504     {
4505         concatDim = 0;
4506         armnn::PermutationVector forwardPermutation({2, 0, 1});
4507         armnn::PermutationVector reversePermutation({1, 2, 0});
4508         permutations = std::make_pair(forwardPermutation, reversePermutation);
4509     }
4510     else
4511     {
4512         BOOST_ASSERT(expandedConcatAxis == 0);
4513         concatDim = 0;
4514     }
4515 }
4516
4517 //
4518 // Permute the input tensors so we can do a supported concatenation.
4519 // Also treat lower than 3d tensors as 3d by adding dummy 1 dimensions
4520 // at the front. Finally this function tells what the output shape
4521 // of the permuted concatenated tensor is going to be.
4522 //
4523 template <typename T>
4524 void PermuteInputsForConcat(
4525         armnn::IWorkloadFactory& workloadFactory,
4526         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4527         std::vector<armnn::TensorInfo> & inputTensorInfos,
4528         std::vector<T *> & inputData,
4529         std::vector<std::vector<T>> & inputDataStorage,
4530         armnn::PermutationVector & permuteVector,
4531         unsigned int & concatDim,
4532         armnn::TensorInfo & outputTensorInfo)
4533 {
4534     BOOST_ASSERT_MSG(inputTensorInfos.size() > 1,
4535         "Expecting more than one tensor to be concatenated here");
4536
4537     unsigned int numDims = 0;
4538     unsigned int nthInput = 0;
4539     const armnn::PermutationVector identity({0, 1, 2});
4540
4541     std::pair<armnn::PermutationVector, armnn::PermutationVector> permutations =
4542         std::make_pair(identity, identity);
4543
4544     inputDataStorage.resize(inputData.size());
4545
4546     for (auto && tensorInfo : inputTensorInfos)
4547     {
4548         if (numDims == 0)
4549         {
4550             numDims = tensorInfo.GetShape().GetNumDimensions();
4551             Generate3dPermuteVectorForConcat(numDims, concatDim, permutations);
4552
4553             // Store the reverese permutation.
4554             permuteVector = permutations.second;
4555             BOOST_ASSERT_MSG(!permuteVector.IsEqual(identity),
4556                 "Test logic error, we don't need permutation, so we shouldn't arrive here");
4557         }
4558         else
4559         {
4560             BOOST_ASSERT_MSG(numDims == tensorInfo.GetShape().GetNumDimensions(),
4561                 "All inputs must have the same number of dimensions");
4562         }
4563
4564         armnn::TensorInfo newTensorInfo = tensorInfo;
4565         newTensorInfo.SetShape(ExpandTensorShapeTo3dForPermute(tensorInfo.GetShape()));
4566
4567         PermuteTensorData<T>(workloadFactory,
4568                              memoryManager,
4569                              permutations.first,
4570                              newTensorInfo,
4571                              inputData[nthInput],
4572                              inputDataStorage[nthInput]);
4573
4574         inputData[nthInput] = inputDataStorage[nthInput].data();
4575         inputTensorInfos[nthInput] = newTensorInfo;
4576
4577         ++nthInput;
4578     }
4579
4580     outputTensorInfo.SetShape(
4581         armnnUtils::Permuted(
4582             ExpandTensorShapeTo3dForPermute(outputTensorInfo.GetShape()),
4583             permutations.first));
4584 }
4585
4586
4587 //
4588 // This is the pair of PermuteInputsForConcat(...) which permutes back
4589 // the output of the concatenation so we can check it against an expected
4590 // output.
4591 //
4592 template <typename T>
4593 void PermuteOutputForConcat(
4594         armnn::IWorkloadFactory& workloadFactory,
4595         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4596         const armnn::TensorInfo & tensorInfo,
4597         const armnn::PermutationVector & permuteVector,
4598         std::unique_ptr<armnn::ITensorHandle> && inputDataHandle,
4599         T * data)
4600 {
4601     BOOST_ASSERT_MSG(data != nullptr, "data must not be null");
4602     if (data == nullptr)
4603     {
4604         // Nullptr is an error in the test. By returning without doing the permutation
4605         // I expect the caller to fail the test. It still makes sense to report this as
4606         // an assert for Debug builds.
4607         return;
4608     }
4609
4610     armnn::TensorInfo resultTensorInfo = tensorInfo;
4611     std::vector<T> inputData(tensorInfo.GetNumElements());
4612     std::vector<T> outputData;
4613
4614     CopyDataFromITensorHandle(&inputData[0], inputDataHandle.get());
4615
4616     PermuteTensorData<T>(workloadFactory,
4617                          memoryManager,
4618                          permuteVector,
4619                          resultTensorInfo,
4620                          &inputData[0],
4621                          outputData);
4622
4623     ::memcpy(data, &outputData[0], sizeof(T)*outputData.size());
4624 }
4625
4626 template <typename T>
4627 void Concatenate(
4628     armnn::IWorkloadFactory& workloadFactory,
4629     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4630     std::initializer_list<const armnn::TensorInfo> inputTensorInfosOrig,
4631     std::initializer_list<T *> inputsOrig,
4632     const armnn::TensorInfo& outputTensorInfoOrig,
4633     T * output,
4634     unsigned int concatDim,
4635     bool useSubtensor)
4636 {
4637     BOOST_ASSERT_MSG(output != nullptr, "output must not be null");
4638     if (output == nullptr)
4639     {
4640         // Nullptr is an error in the test. By returning without doing the permutation
4641         // I expect the caller to fail the test. It still makes sense to report this as
4642         // an assert for Debug builds.
4643         return;
4644     }
4645
4646     // Saves a copy of the parameters which we might need to change.
4647     std::vector<armnn::TensorInfo> inputTensorInfos(inputTensorInfosOrig.begin(), inputTensorInfosOrig.end());
4648     std::vector<T *> inputs            = inputsOrig;
4649     armnn::TensorInfo outputTensorInfo = outputTensorInfoOrig;
4650
4651     armnn::PermutationVector permuteVector{0, 1, 2};
4652
4653     // Holds and automatically releases memory for the reshaped input data.
4654     std::vector<std::vector<T>> tmpInputDataStorage;
4655
4656     const size_t inputCount = inputTensorInfos.size();
4657
4658     bool needPermuteForConcat = NeedPermuteForConcat(inputTensorInfos, concatDim);
4659
4660     if (needPermuteForConcat)
4661     {
4662         //
4663         // We need to permute the inputs, because concatenation along
4664         // the requested axis is not supported.
4665         //
4666         PermuteInputsForConcat<T>(workloadFactory,
4667                                   memoryManager,
4668                                   inputTensorInfos,
4669                                   inputs,
4670                                   tmpInputDataStorage,
4671                                   permuteVector,
4672                                   concatDim,
4673                                   outputTensorInfo);
4674     }
4675
4676     armnn::WorkloadInfo workloadInfo;
4677
4678     std::vector<std::unique_ptr<armnn::ITensorHandle>> inputHandles;
4679     inputHandles.reserve(inputCount);
4680
4681     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4682
4683     armnn::ConcatQueueDescriptor queueDescriptor;
4684     armnn::OriginsDescriptor viewsDescriptor = CreateDescriptorForConcatenation(inputTensorInfos, concatDim);
4685     queueDescriptor.m_Parameters = viewsDescriptor;
4686
4687     if (useSubtensor)
4688     {
4689         queueDescriptor.m_ViewOrigins.reserve(viewsDescriptor.GetNumViews());
4690         for (unsigned int i = 0; i < viewsDescriptor.GetNumViews(); ++i)
4691         {
4692             queueDescriptor.m_ViewOrigins.emplace_back(std::vector<unsigned int>(viewsDescriptor.GetViewOrigin(i),
4693                 viewsDescriptor.GetViewOrigin(i) + viewsDescriptor.GetNumDimensions()));
4694         }
4695
4696         outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4697
4698         const bool subTensorsSupported = workloadFactory.SupportsSubTensors();
4699         for (unsigned int i = 0; i < inputCount; ++i)
4700         {
4701             const armnn::TensorInfo& inputTensorInfo = inputTensorInfos[i];
4702             std::unique_ptr<armnn::ITensorHandle> inputHandle =
4703                 subTensorsSupported ?
4704                     workloadFactory.CreateSubTensorHandle(*outputHandle,
4705                                                           inputTensorInfo.GetShape(),
4706                                                           queueDescriptor.m_ViewOrigins[i].m_Origin.data()) :
4707                     workloadFactory.CreateTensorHandle(inputTensorInfo);
4708
4709             inputHandles.emplace_back(std::move(inputHandle));
4710         }
4711
4712     }
4713     else
4714     {
4715         for (unsigned int i = 0; i < inputCount; ++i)
4716         {
4717             std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfos[i]);
4718             inputHandles.emplace_back(std::move(inputHandle));
4719         }
4720     }
4721
4722     for (unsigned int i = 0; i < inputCount; ++i)
4723     {
4724         AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfos[i], inputHandles[i].get());
4725     }
4726
4727     AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
4728
4729     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConcat(queueDescriptor, workloadInfo);
4730
4731     for (auto& inputHandle : inputHandles)
4732     {
4733         inputHandle->Allocate();
4734     }
4735
4736     outputHandle->Allocate();
4737
4738     unsigned int nextInputId = 0;
4739     for (auto& inputHandle : inputHandles)
4740     {
4741         CopyDataToITensorHandle(inputHandle.get(), inputs[nextInputId]);
4742         ++nextInputId;
4743     }
4744
4745     workload->PostAllocationConfigure();
4746     workload->Execute();
4747
4748     if (needPermuteForConcat)
4749     {
4750         PermuteOutputForConcat<T>(workloadFactory,
4751                                   memoryManager,
4752                                   outputTensorInfo,
4753                                   permuteVector,
4754                                   std::move(outputHandle),
4755                                   output);
4756     }
4757     else
4758     {
4759         CopyDataFromITensorHandle(output, outputHandle.get());
4760     }
4761 }
4762
4763 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
4764 LayerTestResult<T, 1> Concatenation1dTestImpl(
4765     armnn::IWorkloadFactory& workloadFactory,
4766     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4767     float qScale,
4768     int32_t qOffset)
4769 {
4770     armnn::TensorInfo inputTensorInfo({ 3 }, ArmnnType, qScale, qOffset);
4771
4772     auto input0 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 1.0f, 2.0f, 3.0f }));
4773     auto input1 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 4.0f, 5.0f, 6.0f }));
4774     auto input2 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 7.0f, 8.0f, 9.0f }));
4775
4776     armnn::TensorInfo outputTensorInfo({ 9 }, ArmnnType, qScale, qOffset);
4777
4778     LayerTestResult<T, 1> result(outputTensorInfo);
4779
4780     std::vector<T> output;
4781     output.resize(outputTensorInfo.GetNumElements());
4782     Concatenate<T>(workloadFactory, memoryManager,
4783                    { inputTensorInfo, inputTensorInfo, inputTensorInfo },
4784                    { input0.data(), input1.data(), input2.data() },
4785                    outputTensorInfo,
4786                    output.data(),
4787                    0,
4788                    true);
4789
4790     result.output = MakeTensor<T, 1>(outputTensorInfo, output);
4791     result.outputExpected = MakeTensor<T, 1>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4792         1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f
4793     }));
4794
4795     return result;
4796 }
4797
4798 LayerTestResult<float, 1> Concatenation1dTest(
4799     armnn::IWorkloadFactory& workloadFactory,
4800     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
4801 {
4802     return Concatenation1dTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
4803 }
4804
4805 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
4806 LayerTestResult<T, 2> Concatenation2dTestImpl(
4807     armnn::IWorkloadFactory& workloadFactory,
4808     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4809     const armnn::TensorInfo& outputTensorInfo,
4810     unsigned int dimension,
4811     const float qScale,
4812     const int32_t qOffset)
4813 {
4814     armnn::TensorInfo inputTensorInfo({ 2, 3 }, ArmnnType, qScale, qOffset);
4815
4816     auto input0 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4817         // Batch 0
4818         1.0f, 2.0f, 3.0f,
4819
4820         // Batch 1
4821         10.0f, 11.0f, 12.0f,
4822     }));
4823
4824     auto input1 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4825         // Batch 0
4826         4.0f, 5.0f, 6.0f,
4827
4828         // Batch 1
4829         13.0f, 14.0f, 15.0f,
4830     }));
4831
4832     auto input2 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4833         // Batch 0
4834         7.0f, 8.0f, 9.0f,
4835
4836         // Batch 1
4837         16.0f, 17.0f, 18.0f,
4838     }));
4839
4840     LayerTestResult<T, 2> result(outputTensorInfo);
4841
4842     std::vector<T> output;
4843     output.resize(outputTensorInfo.GetNumElements());
4844     Concatenate<T>(workloadFactory, memoryManager,
4845                    { inputTensorInfo, inputTensorInfo, inputTensorInfo },
4846                    { input0.data(), input1.data(), input2.data() },
4847                    outputTensorInfo,
4848                    output.data(),
4849                    dimension,
4850                    true);
4851
4852     result.output = MakeTensor<T, 2>(outputTensorInfo, output);
4853     return result;
4854 }
4855
4856 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
4857 LayerTestResult<T, 2> Concatenation2dDim0TestImpl(
4858     armnn::IWorkloadFactory& workloadFactory,
4859     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4860     float qScale,
4861     int32_t qOffset)
4862 {
4863     armnn::TensorInfo outputTensorInfo({ 6, 3 }, ArmnnType, qScale, qOffset);
4864
4865     LayerTestResult<T, 2> result = Concatenation2dTestImpl<ArmnnType>(
4866         workloadFactory, memoryManager, outputTensorInfo, 0, qScale, qOffset);
4867
4868     result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4869         // Batch 0
4870         1.0f, 2.0f, 3.0f,
4871
4872         // Batch 1
4873         10.0f, 11.0f, 12.0f,
4874
4875         // Batch 2
4876         4.0f, 5.0f, 6.0f,
4877
4878         // Batch 3
4879         13.0f, 14.0f, 15.0f,
4880
4881         // Batch 4
4882         7.0f, 8.0f, 9.0f,
4883
4884         // Batch 5
4885         16.0f, 17.0f, 18.0f,
4886     }));
4887
4888     return result;
4889 }
4890
4891 LayerTestResult<float, 2> Concatenation2dDim0Test(
4892     armnn::IWorkloadFactory& workloadFactory,
4893     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
4894 {
4895     return Concatenation2dDim0TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
4896 }
4897
4898 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
4899 LayerTestResult<T, 2> Concatenation2dDim1TestImpl(
4900     armnn::IWorkloadFactory& workloadFactory,
4901     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4902     float qScale,
4903     int32_t qOffset)
4904 {
4905     armnn::TensorInfo outputTensorInfo({ 2, 9 }, ArmnnType, qScale, qOffset);
4906
4907     LayerTestResult<T, 2> result = Concatenation2dTestImpl<ArmnnType>(
4908         workloadFactory, memoryManager, outputTensorInfo, 1, qScale, qOffset);
4909
4910     result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4911         // Batch 0
4912         1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f,
4913
4914         // Batch 1
4915         10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f
4916     }));
4917
4918     return result;
4919 }
4920
4921 LayerTestResult<float, 2> Concatenation2dDim1Test(
4922     armnn::IWorkloadFactory& workloadFactory,
4923     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
4924 {
4925     return Concatenation2dDim1TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
4926 }
4927
4928 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
4929 LayerTestResult<T, 2> Concatenation2dDim0DiffInputDimsTestImpl(
4930     armnn::IWorkloadFactory& workloadFactory,
4931     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4932     float qScale,
4933     int32_t qOffset)
4934 {
4935     armnn::TensorInfo input0TensorInfo({ 2, 3 }, ArmnnType, qScale, qOffset);
4936     auto input0 = MakeTensor<T, 2>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4937         // Batch 0
4938         1.0f, 2.0f, 3.0f,
4939
4940         // Batch 1
4941         10.0f, 11.0f, 12.0f,
4942     }));
4943
4944     armnn::TensorInfo input1TensorInfo({ 3, 3 }, ArmnnType, qScale, qOffset);
4945     auto input1 = MakeTensor<T, 2>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4946         // Batch 0
4947         4.0f, 5.0f, 6.0f,
4948
4949         // Batch 1
4950         13.0f, 14.0f, 15.0f,
4951
4952         // Batch 0
4953         7.0f, 8.0f, 9.0f,
4954     }));
4955
4956     armnn::TensorInfo input2TensorInfo({ 1, 3 }, ArmnnType, qScale, qOffset);
4957     auto input2 = MakeTensor<T, 2>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4958         // Batch 1
4959         16.0f, 17.0f, 18.0f,
4960     }));
4961
4962     armnn::TensorInfo outputTensorInfo({ 6, 3 }, ArmnnType, qScale, qOffset);
4963     LayerTestResult<T, 2> result(outputTensorInfo);
4964
4965     std::vector<T> output;
4966     output.resize(outputTensorInfo.GetNumElements());
4967     Concatenate<T>(workloadFactory, memoryManager,
4968                    { input0TensorInfo, input1TensorInfo, input2TensorInfo },
4969                    { input0.data(), input1.data(), input2.data() },
4970                    outputTensorInfo,
4971                    output.data(),
4972                    0,
4973                    true);
4974
4975     result.output = MakeTensor<T, 2>(outputTensorInfo, output);
4976     result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4977         // Batch 0
4978         1.0f, 2.0f, 3.0f,
4979
4980         // Batch 1
4981         10.0f, 11.0f, 12.0f,
4982
4983         // Batch 2
4984         4.0f, 5.0f, 6.0f,
4985
4986         // Batch 3
4987         13.0f, 14.0f, 15.0f,
4988
4989         // Batch 4
4990         7.0f, 8.0f, 9.0f,
4991
4992         // Batch 5
4993         16.0f, 17.0f, 18.0f,
4994     }));
4995
4996     return result;
4997 }
4998
4999 LayerTestResult<float, 2> Concatenation2dDim0DiffInputDimsTest(
5000     armnn::IWorkloadFactory& workloadFactory,
5001     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
5002 {
5003     return Concatenation2dDim0DiffInputDimsTestImpl<armnn::DataType::Float32>(
5004         workloadFactory, memoryManager, 0.0f, 0);
5005 }
5006
5007 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
5008 LayerTestResult<T, 2> Concatenation2dDim1DiffInputDimsTestImpl(
5009     armnn::IWorkloadFactory& workloadFactory,
5010     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5011     float qScale,
5012     int32_t qOffset)
5013 {
5014     armnn::TensorInfo input0TensorInfo({ 2, 3 }, ArmnnType, qScale, qOffset);
5015     auto input0 = MakeTensor<T, 2>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5016         // Batch 0
5017         1.0f, 2.0f, 3.0f,
5018
5019         // Batch 1
5020         10.0f, 11.0f, 12.0f,
5021     }));
5022
5023     armnn::TensorInfo input1TensorInfo({ 2, 5 }, ArmnnType, qScale, qOffset);
5024     auto input1 = MakeTensor<T, 2>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5025         // Batch 0
5026         4.0f, 5.0f, 6.0f, 7.0f, 8.0f,
5027
5028         // Batch 1
5029         13.0f, 14.0f, 15.0f, 16.0f, 17.0f,
5030     }));
5031
5032     armnn::TensorInfo input2TensorInfo({ 2, 1 }, ArmnnType, qScale, qOffset);
5033     auto input2 = MakeTensor<T, 2>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5034         // Batch 0
5035         9.0f,
5036
5037         // Batch 1
5038         18.0f
5039     }));
5040
5041     armnn::TensorInfo outputTensorInfo({ 2, 9 }, ArmnnType, qScale, qOffset);
5042     LayerTestResult<T, 2> result(outputTensorInfo);
5043
5044     std::vector<T> output;
5045     output.resize(outputTensorInfo.GetNumElements());
5046     Concatenate<T>(workloadFactory, memoryManager,
5047                    { input0TensorInfo, input1TensorInfo, input2TensorInfo },
5048                    { input0.data(), input1.data(), input2.data() },
5049                    outputTensorInfo,
5050                    output.data(),
5051                    1,
5052                    true);
5053
5054     result.output = MakeTensor<T, 2>(outputTensorInfo, output);
5055     result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5056         // Batch 0
5057         1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f,
5058
5059         // Batch 1
5060         10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f,
5061     }));
5062
5063     return result;
5064 }
5065
5066 LayerTestResult<float, 2> Concatenation2dDim1DiffInputDimsTest(
5067     armnn::IWorkloadFactory& workloadFactory,
5068     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
5069 {
5070     return Concatenation2dDim1DiffInputDimsTestImpl<armnn::DataType::Float32>(
5071         workloadFactory, memoryManager, 0.0f, 0);
5072 }
5073
5074 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
5075 LayerTestResult<T, 3> Concatenation3dTestImpl(
5076     armnn::IWorkloadFactory& workloadFactory,
5077     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5078     const armnn::TensorInfo& outputTensorInfo,
5079     unsigned int dimension,
5080     bool useSubtensor,
5081     float qScale,
5082     int32_t qOffset)
5083 {
5084     armnn::TensorInfo inputTensorInfo({ 2, 3, 2 }, ArmnnType, qScale, qOffset);
5085
5086     auto input0 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5087         // Batch 0, Channel 0
5088         1.0f, 2.0f,
5089
5090         // Batch 0, Channel 1
5091         3.0f, 4.0f,
5092
5093         // Batch 0, Channel 2
5094         5.0f, 6.0f,
5095
5096         // Batch 1, Channel 0
5097         19.0f, 20.0f,
5098
5099         // Batch 1, Channel 1
5100         21.0f, 22.0f,
5101
5102         // Batch 1, Channel 2
5103         23.0f, 24.0f
5104     }));
5105
5106     auto input1 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5107         // Batch 0, Channel 0
5108         7.0f, 8.0f,
5109
5110         // Batch 0, Channel 1
5111         9.0f, 10.0f,
5112
5113         // Batch 0, Channel 2
5114         11.0f, 12.0f,
5115
5116         // Batch 1, Channel 0
5117         25.0f, 26.0f,
5118
5119         // Batch 1, Channel 1
5120         27.0f, 28.0f,
5121
5122         // Batch 1, Channel 2
5123         29.0f, 30.0f
5124     }));
5125
5126     auto input2 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5127         // Batch 0, Channel 0
5128         13.0f, 14.0f,
5129
5130         // Batch 0, Channel 1
5131         15.0f, 16.0f,
5132
5133         // Batch 0, Channel 2
5134         17.0f, 18.0f,
5135
5136         // Batch 1, Channel 0
5137         31.0f, 32.0f,
5138
5139         // Batch 1, Channel 1
5140         33.0f, 34.0f,
5141
5142         // Batch 1, Channel 2
5143         35.0f, 36.0f
5144     }));
5145
5146     LayerTestResult<T, 3> result(outputTensorInfo);
5147
5148     std::vector<T> output;
5149     output.resize(outputTensorInfo.GetNumElements());
5150     Concatenate<T>(workloadFactory, memoryManager,
5151                    { inputTensorInfo, inputTensorInfo, inputTensorInfo },
5152                    { input0.data(), input1.data(), input2.data() },
5153                    outputTensorInfo,
5154                    output.data(),
5155                    dimension,
5156                    useSubtensor);
5157
5158     result.output = MakeTensor<T, 3>(outputTensorInfo, output);
5159     return result;
5160 }
5161
5162 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
5163 LayerTestResult<T, 3> Concatenation3dDim0TestImpl(
5164     armnn::IWorkloadFactory& workloadFactory,
5165     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5166     float qScale,
5167     int32_t qOffset)
5168 {
5169     armnn::TensorInfo outputTensorInfo({ 6, 3, 2 }, ArmnnType, qScale, qOffset);
5170
5171     LayerTestResult<T, 3> result = Concatenation3dTestImpl<ArmnnType>(
5172         workloadFactory, memoryManager, outputTensorInfo, 0, true, qScale, qOffset);
5173
5174     result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5175         // Batch 0, Channel 0
5176         1.0f, 2.0f,
5177
5178         // Batch 0, Channel 1
5179         3.0f, 4.0f,
5180
5181         // Batch 0, Channel 2
5182         5.0f, 6.0f,
5183
5184         // Batch 1, Channel 0
5185         19.0f, 20.0f,
5186
5187         // Batch 1, Channel 1
5188         21.0f, 22.0f,
5189
5190         // Batch 1, Channel 2
5191         23.0f, 24.0f,
5192
5193         // Batch 2, Channel 0
5194         7.0f, 8.0f,
5195
5196         // Batch 2, Channel 1
5197         9.0f, 10.0f,
5198
5199         // Batch 2, Channel 2
5200         11.0f, 12.0f,
5201
5202         // Batch 3, Channel 0
5203         25.0f, 26.0f,
5204
5205         // Batch 3, Channel 1
5206         27.0f, 28.0f,
5207
5208         // Batch 3, Channel 2
5209         29.0f, 30.0f,
5210
5211         // Batch 4, Channel 0
5212         13.0f, 14.0f,
5213
5214         // Batch 4, Channel 1
5215         15.0f, 16.0f,
5216
5217         // Batch 4, Channel 2
5218         17.0f, 18.0f,
5219
5220         // Batch 5, Channel 0
5221         31.0f, 32.0f,
5222
5223         // Batch 5, Channel 1
5224         33.0f, 34.0f,
5225
5226         // Batch 5, Channel 2
5227         35.0f, 36.0f
5228     }));
5229
5230     return result;
5231 }
5232
5233 LayerTestResult<float, 3> Concatenation3dDim0Test(
5234     armnn::IWorkloadFactory& workloadFactory,
5235     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
5236 {
5237     return Concatenation3dDim0TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
5238 }
5239
5240 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
5241 LayerTestResult<T, 3> Concatenation3dDim1TestImpl(
5242     armnn::IWorkloadFactory& workloadFactory,
5243     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5244     float qScale,
5245     int32_t qOffset)
5246 {
5247     armnn::TensorInfo outputTensorInfo({ 2, 9, 2 }, ArmnnType, qScale, qOffset);
5248
5249     LayerTestResult<T, 3> result = Concatenation3dTestImpl<ArmnnType>(
5250         workloadFactory, memoryManager, outputTensorInfo, 1, true, qScale, qOffset);
5251
5252     result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5253         // Batch 0, Channel 0
5254         1.0f, 2.0f,
5255
5256         // Batch 0, Channel 1
5257         3.0f, 4.0f,
5258
5259         // Batch 0, Channel 2
5260         5.0f, 6.0f,
5261
5262         // Batch 0, Channel 3
5263         7.0f, 8.0f,
5264
5265         // Batch 0, Channel 4
5266         9.0f, 10.0f,
5267
5268         // Batch 0, Channel 5
5269         11.0f, 12.0f,
5270
5271         // Batch 0, Channel 6
5272         13.0f, 14.0f,
5273
5274         // Batch 0, Channel 7
5275         15.0f, 16.0f,
5276
5277         // Batch 0, Channel 8
5278         17.0f, 18.0f,
5279
5280         // Batch 1, Channel 0
5281         19.0f, 20.0f,
5282
5283         // Batch 1, Channel 1
5284         21.0f, 22.0f,
5285
5286         // Batch 1, Channel 2
5287         23.0f, 24.0f,
5288
5289         // Batch 1, Channel 3
5290         25.0f, 26.0f,
5291
5292         // Batch 1, Channel 4
5293         27.0f, 28.0f,
5294
5295         // Batch 1, Channel 5
5296         29.0f, 30.0f,
5297
5298         // Batch 1, Channel 6
5299         31.0f, 32.0f,
5300
5301         // Batch 1, Channel 7
5302         33.0f, 34.0f,
5303
5304         // Batch 1, Channel 8
5305         35.0f, 36.0f
5306     }));
5307
5308     return result;
5309 }
5310
5311 LayerTestResult<float, 3> Concatenation3dDim1Test(
5312     armnn::IWorkloadFactory& workloadFactory,
5313     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
5314 {
5315     return Concatenation3dDim1TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
5316 }
5317
5318 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
5319 LayerTestResult<T, 3> Concatenation3dDim2TestImpl(
5320     armnn::IWorkloadFactory& workloadFactory,
5321     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5322     bool useSubtensor,
5323     float qScale,
5324     int32_t qOffset)
5325 {
5326     armnn::TensorInfo outputTensorInfo({ 2, 3, 6 }, ArmnnType, qScale, qOffset);
5327
5328     LayerTestResult<T, 3> result = Concatenation3dTestImpl<ArmnnType>(
5329         workloadFactory, memoryManager, outputTensorInfo, 2, useSubtensor, qScale, qOffset);
5330
5331     result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5332         // Batch 0, Channel 0
5333         1.0f, 2.0f, 7.0f, 8.0f, 13.0f, 14.0f,
5334
5335         // Batch 0, Channel 1
5336         3.0f, 4.0f, 9.0f, 10.0f, 15.0f, 16.0f,
5337
5338         // Batch 0, Channel 2
5339         5.0f, 6.0f, 11.0f, 12.0f, 17.0f, 18.0f,
5340
5341         // Batch 1, Channel 0
5342         19.0f, 20.0f, 25.0f, 26.0f, 31.0f, 32.0f,
5343
5344         // Batch 1, Channel 1
5345         21.0f, 22.0f, 27.0f, 28.0f, 33.0f, 34.0f,
5346
5347         // Batch 1, Channel 2
5348         23.0f, 24.0f, 29.0f, 30.0f, 35.0f, 36.0f,
5349     }));
5350
5351     return result;
5352 }
5353
5354 LayerTestResult<float, 3> Concatenation3dDim2Test(
5355     armnn::IWorkloadFactory& workloadFactory,
5356     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5357     bool useSubtensor)
5358 {
5359     return Concatenation3dDim2TestImpl<armnn::DataType::Float32>(
5360         workloadFactory, memoryManager, useSubtensor, 0.0f, 0);
5361 }
5362
5363 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
5364 LayerTestResult<T, 3> Concatenation3dDim0DiffInputDimsTestImpl(
5365     armnn::IWorkloadFactory& workloadFactory,
5366     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5367     float qScale,
5368     int32_t qOffset)
5369 {
5370     armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType);
5371     auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5372             // Batch 0, Channel 0
5373             1.0f, 2.0f,
5374
5375             // Batch 0, Channel 1
5376             3.0f, 4.0f,
5377
5378             // Batch 0, Channel 2
5379             5.0f, 6.0f,
5380
5381             // Batch 1, Channel 0
5382             19.0f, 20.0f,
5383
5384             // Batch 1, Channel 1
5385             21.0f, 22.0f,
5386
5387             // Batch 1, Channel 2
5388             23.0f, 24.0f
5389     }));
5390
5391     armnn::TensorInfo input1TensorInfo({ 1, 3, 2 }, ArmnnType);
5392     auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5393             // Batch 0, Channel 0
5394             7.0f, 8.0f,
5395
5396             // Batch 0, Channel 1
5397             9.0f, 10.0f,
5398
5399             // Batch 0, Channel 2
5400             11.0f, 12.0f,
5401     }));
5402
5403     armnn::TensorInfo input2TensorInfo({ 3, 3, 2 }, ArmnnType);
5404     auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5405             // Batch 0, Channel 0
5406             25.0f, 26.0f,
5407
5408             // Batch 0, Channel 1
5409             27.0f, 28.0f,
5410
5411             // Batch 0, Channel 2
5412             29.0f, 30.0f,
5413
5414             // Batch 1, Channel 0
5415             13.0f, 14.0f,
5416
5417             // Batch 1, Channel 1
5418             15.0f, 16.0f,
5419
5420             // Batch 1, Channel 2
5421             17.0f, 18.0f,
5422
5423             // Batch 2, Channel 0
5424             31.0f, 32.0f,
5425
5426             // Batch 2, Channel 1
5427             33.0f, 34.0f,
5428
5429             // Batch 2, Channel 2
5430             35.0f, 36.0f
5431     }));
5432
5433     armnn::TensorInfo outputTensorInfo({ 6, 3, 2 }, ArmnnType);
5434     LayerTestResult<T, 3> result(outputTensorInfo);
5435
5436     std::vector<T> output;
5437     output.resize(outputTensorInfo.GetNumElements());
5438     Concatenate<T>(workloadFactory, memoryManager,
5439                    { input0TensorInfo, input1TensorInfo, input2TensorInfo },
5440                    { input0.data(), input1.data(), input2.data() },
5441                    outputTensorInfo,
5442                    output.data(),
5443                    0,
5444                    true);
5445
5446     result.output = MakeTensor<T, 3>(outputTensorInfo, output);
5447     result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5448         // Batch 0, Channel 0
5449         1.0f, 2.0f,
5450
5451         // Batch 0, Channel 1
5452         3.0f, 4.0f,
5453
5454         // Batch 0, Channel 2
5455         5.0f, 6.0f,
5456
5457         // Batch 1, Channel 0
5458         19.0f, 20.0f,
5459
5460         // Batch 1, Channel 1
5461         21.0f, 22.0f,
5462
5463         // Batch 1, Channel 2
5464         23.0f, 24.0f,
5465
5466         // Batch 2, Channel 0
5467         7.0f, 8.0f,
5468
5469         // Batch 2, Channel 1
5470         9.0f, 10.0f,
5471
5472         // Batch 2, Channel 2
5473         11.0f, 12.0f,
5474
5475         // Batch 3, Channel 0
5476         25.0f, 26.0f,
5477
5478         // Batch 3, Channel 1
5479         27.0f, 28.0f,
5480
5481         // Batch 3, Channel 2
5482         29.0f, 30.0f,
5483
5484         // Batch 4, Channel 0
5485         13.0f, 14.0f,
5486
5487         // Batch 4, Channel 1
5488         15.0f, 16.0f,
5489
5490         // Batch 4, Channel 2
5491         17.0f, 18.0f,
5492
5493         // Batch 5, Channel 0
5494         31.0f, 32.0f,
5495
5496         // Batch 5, Channel 1
5497         33.0f, 34.0f,
5498
5499         // Batch 5, Channel 2
5500         35.0f, 36.0f
5501     }));
5502
5503     return result;
5504 }
5505
5506 LayerTestResult<float, 3> Concatenation3dDim0DiffInputDimsTest(
5507     armnn::IWorkloadFactory& workloadFactory,
5508     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
5509 {
5510     return Concatenation3dDim0DiffInputDimsTestImpl<armnn::DataType::Float32>(
5511         workloadFactory, memoryManager, 0.0f, 0);
5512 }
5513
5514 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
5515 LayerTestResult<T, 3> Concatenation3dDim1DiffInputDimsTestImpl(
5516     armnn::IWorkloadFactory& workloadFactory,
5517     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5518     float qScale,
5519     int32_t qOffset)
5520 {
5521     armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType, qScale, qOffset);
5522     auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5523         // Batch 0, Channel 0
5524         1.0f, 2.0f,
5525
5526         // Batch 0, Channel 1
5527         3.0f, 4.0f,
5528
5529         // Batch 0, Channel 2
5530         5.0f, 6.0f,
5531
5532         // Batch 1, Channel 0
5533         19.0f, 20.0f,
5534
5535         // Batch 1, Channel 1
5536         21.0f, 22.0f,
5537
5538         // Batch 1, Channel 2
5539         23.0f, 24.0f
5540     }));
5541
5542     armnn::TensorInfo input1TensorInfo({ 2, 4, 2 }, ArmnnType, qScale, qOffset);
5543     auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5544         // Batch 0, Channel 0
5545         7.0f, 8.0f,
5546
5547         // Batch 0, Channel 1
5548         9.0f, 10.0f,
5549
5550         // Batch 0, Channel 2
5551         11.0f, 12.0f,
5552
5553         // Batch 0, Channel 3
5554         25.0f, 26.0f,
5555
5556         // Batch 1, Channel 0
5557         27.0f, 28.0f,
5558
5559         // Batch 1, Channel 1
5560         29.0f, 30.0f,
5561
5562         // Batch 1, Channel 2
5563         13.0f, 14.0f,
5564
5565         // Batch 1, Channel 3
5566         15.0f, 16.0f,
5567     }));
5568
5569     armnn::TensorInfo input2TensorInfo({ 2, 1, 2 }, ArmnnType, qScale, qOffset);
5570     auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5571         // Batch 0, Channel 0
5572         17.0f, 18.0f,
5573
5574         // Batch 1, Channel 0
5575         31.0f, 32.0f,
5576     }));
5577
5578     armnn::TensorInfo outputTensorInfo({ 2, 8, 2 }, ArmnnType, qScale, qOffset);
5579     LayerTestResult<T, 3> result(outputTensorInfo);
5580
5581     std::vector<T> output;
5582     output.resize(outputTensorInfo.GetNumElements());
5583     Concatenate<T>(workloadFactory, memoryManager,
5584                    { input0TensorInfo, input1TensorInfo, input2TensorInfo },
5585                    { input0.data(), input1.data(), input2.data() },
5586                    outputTensorInfo,
5587                    output.data(),
5588                    1,
5589                    true);
5590
5591     result.output = MakeTensor<T, 3>(outputTensorInfo, output);
5592     result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5593         // Batch 0, Channel 0
5594         1.0f, 2.0f,
5595
5596         // Batch 0, Channel 1
5597         3.0f, 4.0f,
5598
5599         // Batch 0, Channel 2
5600         5.0f, 6.0f,
5601
5602         // Batch 0, Channel 3
5603         7.0f, 8.0f,
5604
5605         // Batch 0, Channel 4
5606         9.0f, 10.0f,
5607
5608         // Batch 0, Channel 5
5609         11.0f, 12.0f,
5610
5611         // Batch 0, Channel 6
5612         25.0f, 26.0f,
5613
5614         // Batch 0, Channel 7
5615         17.0f, 18.0f,
5616
5617         // Batch 1, Channel 0
5618         19.0f, 20.0f,
5619
5620         // Batch 1, Channel 1
5621         21.0f, 22.0f,
5622
5623         // Batch 1, Channel 2
5624         23.0f, 24.0f,
5625
5626         // Batch 1, Channel 3
5627         27.0f, 28.0f,
5628
5629         // Batch 1, Channel 4
5630         29.0f, 30.0f,
5631
5632         // Batch 1, Channel 5
5633         13.0f, 14.0f,
5634
5635         // Batch 1, Channel 6
5636         15.0f, 16.0f,
5637
5638         // Batch 1, Channel 7
5639         31.0f, 32.0f,
5640     }));
5641
5642     return result;
5643 }
5644
5645 LayerTestResult<float, 3> Concatenation3dDim1DiffInputDimsTest(
5646     armnn::IWorkloadFactory& workloadFactory,
5647     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
5648 {
5649     return Concatenation3dDim1DiffInputDimsTestImpl<armnn::DataType::Float32>(
5650         workloadFactory, memoryManager, 0.0f, 0);
5651 }
5652
5653 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
5654 LayerTestResult<T, 3> Concatenation3dDim2DiffInputDimsTestImpl(
5655     armnn::IWorkloadFactory& workloadFactory,
5656     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5657     bool useSubtensor,
5658     float qScale,
5659     int32_t qOffset)
5660 {
5661     armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType, qScale, qOffset);
5662     auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5663         // Batch 0, Channel 0
5664         1.0f, 2.0f,
5665
5666         // Batch 0, Channel 1
5667         3.0f, 4.0f,
5668
5669         // Batch 0, Channel 2
5670         5.0f, 6.0f,
5671
5672         // Batch 1, Channel 0
5673         19.0f, 20.0f,
5674
5675         // Batch 1, Channel 1
5676         21.0f, 22.0f,
5677
5678         // Batch 1, Channel 2
5679         23.0f, 24.0f
5680     }));
5681
5682     armnn::TensorInfo input1TensorInfo({ 2, 3, 1 }, ArmnnType, qScale, qOffset);
5683     auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5684         // Batch 0, Channel 0
5685         7.0f,
5686
5687         // Batch 0, Channel 1
5688         9.0f,
5689
5690         // Batch 0, Channel 2
5691         11.0f,
5692
5693         // Batch 1, Channel 0
5694         25.0f,
5695
5696         // Batch 1, Channel 1
5697         27.0f,
5698
5699         // Batch 1, Channel 2
5700         29.0f
5701     }));
5702
5703     armnn::TensorInfo input2TensorInfo({ 2, 3, 3 }, ArmnnType, qScale, qOffset);
5704     auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5705         // Batch 0, Channel 0
5706         13.0f, 14.0f, 50.0f,
5707
5708         // Batch 0, Channel 1
5709         15.0f, 16.0f, 51.0f,
5710
5711         // Batch 0, Channel 2
5712         17.0f, 18.0f, 52.0f,
5713
5714         // Batch 1, Channel 0
5715         31.0f, 32.0f, 53.0f,
5716
5717         // Batch 1, Channel 1
5718         33.0f, 34.0f, 54.0f,
5719
5720         // Batch 1, Channel 2
5721         35.0f, 36.0f, 55.0f,
5722     }));
5723
5724     armnn::TensorInfo outputTensorInfo({ 2, 3, 6 }, ArmnnType, qScale, qOffset);
5725     LayerTestResult<T, 3> result(outputTensorInfo);
5726
5727     std::vector<T> output;
5728     output.resize(outputTensorInfo.GetNumElements());
5729     Concatenate<T>(workloadFactory, memoryManager,
5730                    { input0TensorInfo, input1TensorInfo, input2TensorInfo },
5731                    { input0.data(), input1.data(), input2.data() },
5732                    outputTensorInfo,
5733                    output.data(),
5734                    2,
5735                    useSubtensor);
5736
5737     result.output = MakeTensor<T, 3>(outputTensorInfo, output);
5738     result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5739         // Batch 0, Channel 0
5740         1.0f, 2.0f, 7.0f, 13.0f, 14.0f, 50.0f,
5741
5742         // Batch 0, Channel 1
5743         3.0f, 4.0f, 9.0f, 15.0f, 16.0f, 51.0f,
5744
5745         // Batch 0, Channel 2
5746         5.0f, 6.0f, 11.0f, 17.0f, 18.0f, 52.0f,
5747
5748         // Batch 1, Channel 0
5749         19.0f, 20.0f, 25.0f, 31.0f, 32.0f, 53.0f,
5750
5751         // Batch 1, Channel 1
5752         21.0f, 22.0f, 27.0f, 33.0f, 34.0f, 54.0f,
5753
5754         // Batch 1, Channel 2
5755         23.0f, 24.0f, 29.0f, 35.0f, 36.0f, 55.0f,
5756     }));
5757
5758     return result;
5759 }
5760
5761 LayerTestResult<float, 3> Concatenation3dDim2DiffInputDimsTest(
5762     armnn::IWorkloadFactory& workloadFactory,
5763     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5764     bool useSubtensor)
5765 {
5766     return Concatenation3dDim2DiffInputDimsTestImpl<armnn::DataType::Float32>(
5767         workloadFactory, memoryManager, useSubtensor, 0.0f, 0);
5768 }
5769
5770 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
5771 LayerTestResult<T, 4> Concatenation4dTestImpl(
5772     armnn::IWorkloadFactory& workloadFactory,
5773     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5774     const armnn::TensorInfo& outputTensorInfo,
5775     unsigned int dimension,
5776     bool useSubtensor,
5777     float qScale,
5778     int32_t qOffset)
5779 {
5780     armnn::TensorInfo inputTensorInfo({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
5781
5782     auto input0 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5783         1.0f, 2.0f,
5784         3.0f, 4.0f,
5785         5.0f, 6.0f,
5786         7.0f, 8.0f,
5787         9.0f, 10.0f,
5788         11.0f, 12.0f
5789     }));
5790
5791     auto input1 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5792         11.0f, 12.0f,
5793         13.0f, 14.0f,
5794         15.0f, 16.0f,
5795         17.0f, 18.0f,
5796         19.0f, 20.0f,
5797         21.0f, 22.0f
5798     }));
5799
5800     auto input2 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5801         21.0f, 22.0f,
5802         23.0f, 24.0f,
5803         25.0f, 26.0f,
5804         27.0f, 28.0f,
5805         29.0f, 30.0f,
5806         31.0f, 32.0f
5807     }));
5808
5809     LayerTestResult<T, 4> result(outputTensorInfo);
5810
5811     std::vector<T> output;
5812     output.resize(outputTensorInfo.GetNumElements());
5813
5814     Concatenate<T>(workloadFactory,
5815                    memoryManager,
5816                    {inputTensorInfo, inputTensorInfo, inputTensorInfo},
5817                    {input0.data(), input1.data(), input2.data()},
5818                    outputTensorInfo,
5819                    output.data(),
5820                    dimension,
5821                    useSubtensor);
5822
5823     result.output = MakeTensor<T, 4>(outputTensorInfo, output);
5824     return result;
5825 }
5826
5827 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
5828 LayerTestResult<T, 4> Concatenation4dDim0TestImpl(
5829     armnn::IWorkloadFactory& workloadFactory,
5830     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5831     float qScale,
5832     int32_t qOffset)
5833 {
5834     armnn::TensorInfo outputTensorInfo({ 3, 3, 2, 2 }, ArmnnType, qScale, qOffset);
5835
5836     LayerTestResult<T, 4> result = Concatenation4dTestImpl<ArmnnType>(
5837         workloadFactory, memoryManager, outputTensorInfo, 0, true, qScale, qOffset);
5838
5839     result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5840         1.0f, 2.0f,
5841         3.0f, 4.0f,
5842         5.0f, 6.0f,
5843         7.0f, 8.0f,
5844         9.0f, 10.0f,
5845         11.0f, 12.0f,
5846
5847         11.0f, 12.0f,
5848         13.0f, 14.0f,
5849         15.0f, 16.0f,
5850         17.0f, 18.0f,
5851         19.0f, 20.0f,
5852         21.0f, 22.0f,
5853
5854         21.0f, 22.0f,
5855         23.0f, 24.0f,
5856         25.0f, 26.0f,
5857         27.0f, 28.0f,
5858         29.0f, 30.0f,
5859         31.0f, 32.0f
5860     }));
5861     return result;
5862 }
5863
5864 LayerTestResult<float, 4> Concatenation4dDim0Test(
5865     armnn::IWorkloadFactory& workloadFactory,
5866     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
5867 {
5868     return Concatenation4dDim0TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
5869 }
5870
5871 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
5872 LayerTestResult<T, 4> Concatenation4dDim1TestImpl(
5873     armnn::IWorkloadFactory& workloadFactory,
5874     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5875     float qScale,
5876     int32_t qOffset)
5877 {
5878     armnn::TensorInfo outputTensorInfo({ 1, 9, 2, 2 }, ArmnnType, qScale, qOffset);
5879
5880     LayerTestResult<T, 4> result = Concatenation4dTestImpl<ArmnnType>(
5881         workloadFactory, memoryManager, outputTensorInfo, 1, true, qScale, qOffset);
5882
5883     result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5884         1.0f, 2.0f,
5885         3.0f, 4.0f,
5886         5.0f, 6.0f,
5887         7.0f, 8.0f,
5888         9.0f, 10.0f,
5889         11.0f, 12.0f,
5890
5891         11.0f, 12.0f,
5892         13.0f, 14.0f,
5893         15.0f, 16.0f,
5894         17.0f, 18.0f,
5895         19.0f, 20.0f,
5896         21.0f, 22.0f,
5897
5898         21.0f, 22.0f,
5899         23.0f, 24.0f,
5900         25.0f, 26.0f,
5901         27.0f, 28.0f,
5902         29.0f, 30.0f,
5903         31.0f, 32.0f
5904     }));
5905
5906     return result;
5907 }
5908
5909 LayerTestResult<float, 4> Concatenation4dDim1Test(
5910     armnn::IWorkloadFactory& workloadFactory,
5911     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
5912 {
5913     return Concatenation4dDim1TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
5914 }
5915
5916 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
5917 LayerTestResult<T, 4> Concatenation4dDim2TestImpl(
5918     armnn::IWorkloadFactory& workloadFactory,
5919     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5920     float qScale,
5921     int32_t qOffset)
5922 {
5923     armnn::TensorInfo outputTensorInfo({ 1, 3, 6, 2 }, ArmnnType, qScale, qOffset);
5924
5925     LayerTestResult<T, 4> result = Concatenation4dTestImpl<ArmnnType>(
5926         workloadFactory, memoryManager, outputTensorInfo, 2, true, qScale, qOffset);
5927
5928     result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5929         1.0f, 2.0f,
5930         3.0f, 4.0f,
5931         11.0f, 12.0f,
5932         13.0f, 14.0f,
5933         21.0f, 22.0f,
5934         23.0f, 24.0f,
5935
5936         5.0f, 6.0f,
5937         7.0f, 8.0f,
5938         15.0f, 16.0f,
5939         17.0f, 18.0f,
5940         25.0f, 26.0f,
5941         27.0f, 28.0f,
5942
5943         9.0f, 10.0f,
5944         11.0f, 12.0f,
5945         19.0f, 20.0f,
5946         21.0f, 22.0f,
5947         29.0f, 30.0f,
5948         31.0f, 32.0f
5949     }));
5950
5951     return result;
5952 }
5953
5954 LayerTestResult<float, 4> Concatenation4dDim2Test(
5955     armnn::IWorkloadFactory& workloadFactory,
5956     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
5957 {
5958     return Concatenation4dDim2TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
5959 }
5960
5961 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
5962 LayerTestResult<T, 4> Concatenation4dDim3TestImpl(
5963     armnn::IWorkloadFactory& workloadFactory,
5964     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5965     float qScale,
5966     int32_t qOffset,
5967     bool useSubtensor)
5968 {
5969     armnn::TensorInfo outputTensorInfo({ 1, 3, 2, 6 }, ArmnnType, qScale, qOffset);
5970
5971     LayerTestResult<T, 4> result = Concatenation4dTestImpl<ArmnnType>(
5972         workloadFactory, memoryManager, outputTensorInfo, 3, useSubtensor, qScale, qOffset);
5973
5974     result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5975         1.0f, 2.0f,
5976         11.0f, 12.0f,
5977         21.0f, 22.0f,
5978         3.0f, 4.0f,
5979         13.0f, 14.0f,
5980         23.0f, 24.0f,
5981
5982         5.0f, 6.0f,
5983         15.0f, 16.0f,
5984         25.0f, 26.0f,
5985         7.0f, 8.0f,
5986         17.0f, 18.0f,
5987         27.0f, 28.0f,
5988
5989         9.0f, 10.0f,
5990         19.0f, 20.0f,
5991         29.0f, 30.0f,
5992         11.0f, 12.0f,
5993         21.0f, 22.0f,
5994         31.0f, 32.0f
5995     }));
5996
5997     return result;
5998 }
5999
6000 LayerTestResult<float, 4> Concatenation4dDim3Test(
6001     armnn::IWorkloadFactory& workloadFactory,
6002     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6003     bool useSubtensor)
6004 {
6005     return Concatenation4dDim3TestImpl<armnn::DataType::Float32>(
6006         workloadFactory, memoryManager, 0.0f, 0, useSubtensor);
6007 }
6008
6009 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
6010 LayerTestResult<T, 4> Concatenation4dDiffShapeDim0TestImpl(
6011     armnn::IWorkloadFactory& workloadFactory,
6012     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6013     float qScale,
6014     int32_t qOffset)
6015 {
6016     unsigned int dimension = 0;
6017     armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
6018
6019     auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
6020         1.0f, 2.0f,
6021         3.0f, 4.0f,
6022         5.0f, 6.0f,
6023         7.0f, 8.0f,
6024         9.0f, 10.0f,
6025         11.0f, 12.0f
6026     }));
6027
6028     armnn::TensorInfo inputTensorInfo1({ 2, 3, 2, 2 }, ArmnnType, qScale, qOffset);
6029
6030     auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
6031         11.0f, 12.0f,
6032         13.0f, 14.0f,
6033         15.0f, 16.0f,
6034         17.0f, 18.0f,
6035         19.0f, 20.0f,
6036         21.0f, 22.0f,
6037
6038         21.0f, 22.0f,
6039         23.0f, 24.0f,
6040         25.0f, 26.0f,
6041         27.0f, 28.0f,
6042         29.0f, 30.0f,
6043         31.0f, 32.0f
6044
6045     }));
6046
6047     armnn::TensorInfo outputTensorInfo({ 3, 3, 2, 2 }, ArmnnType, qScale, qOffset);
6048
6049     LayerTestResult<T, 4> result(outputTensorInfo);
6050
6051     std::vector<T> output;
6052     output.resize(outputTensorInfo.GetNumElements());
6053     Concatenate<T>(workloadFactory,
6054                    memoryManager,
6055                    {inputTensorInfo0, inputTensorInfo1},
6056                    {input0.data(), input1.data()},
6057                    outputTensorInfo,
6058                    output.data(),
6059                    dimension,
6060                    true);
6061
6062     result.output = MakeTensor<T, 4>(outputTensorInfo, output);
6063     result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
6064         1.0f, 2.0f,
6065         3.0f, 4.0f,
6066         5.0f, 6.0f,
6067         7.0f, 8.0f,
6068         9.0f, 10.0f,
6069         11.0f, 12.0f,
6070
6071         11.0f, 12.0f,
6072         13.0f, 14.0f,
6073         15.0f, 16.0f,
6074         17.0f, 18.0f,
6075         19.0f, 20.0f,
6076         21.0f, 22.0f,
6077
6078         21.0f, 22.0f,
6079         23.0f, 24.0f,
6080         25.0f, 26.0f,
6081         27.0f, 28.0f,
6082         29.0f, 30.0f,
6083         31.0f, 32.0f
6084     }));
6085
6086     return result;
6087 }
6088
6089 LayerTestResult<float, 4> Concatenation4dDiffShapeDim0Test(
6090     armnn::IWorkloadFactory& workloadFactory,
6091     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
6092 {
6093     return Concatenation4dDiffShapeDim0TestImpl<armnn::DataType::Float32>(
6094         workloadFactory, memoryManager, 0.0f, 0);
6095 }
6096
6097 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
6098 LayerTestResult<T, 4> Concatenation4dDiffShapeDim1TestImpl(
6099     armnn::IWorkloadFactory& workloadFactory,
6100     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6101     float qScale,
6102     int32_t qOffset)
6103 {
6104     unsigned int dimension = 1;
6105     armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
6106
6107     auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
6108         1.0f, 2.0f,
6109         3.0f, 4.0f,
6110         5.0f, 6.0f,
6111         7.0f, 8.0f,
6112         9.0f, 10.0f,
6113         11.0f, 12.0f
6114     }));
6115
6116     armnn::TensorInfo inputTensorInfo1({ 1, 2, 2, 2 }, ArmnnType, qScale, qOffset);
6117
6118     auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
6119         11.0f, 12.0f,
6120         13.0f, 14.0f,
6121         15.0f, 16.0f,
6122         17.0f, 18.0f,
6123
6124     }));
6125
6126     armnn::TensorInfo outputTensorInfo({ 1, 5, 2, 2 }, ArmnnType, qScale, qOffset);
6127
6128     LayerTestResult<T, 4> result(outputTensorInfo);
6129
6130     std::vector<T> output;
6131     output.resize(outputTensorInfo.GetNumElements());
6132     Concatenate<T>(workloadFactory,
6133                    memoryManager,
6134                    {inputTensorInfo0, inputTensorInfo1},
6135                    {input0.data(), input1.data()},
6136                    outputTensorInfo,
6137                    output.data(),
6138                    dimension,
6139                    true);
6140
6141     result.output = MakeTensor<T, 4>(outputTensorInfo, output);
6142     result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
6143         1.0f, 2.0f,
6144         3.0f, 4.0f,
6145         5.0f, 6.0f,
6146         7.0f, 8.0f,
6147         9.0f, 10.0f,
6148         11.0f, 12.0f,
6149         11.0f, 12.0f,
6150         13.0f, 14.0f,
6151         15.0f, 16.0f,
6152         17.0f, 18.0f
6153     }));
6154
6155     return result;
6156 }
6157
6158 LayerTestResult<float, 4> Concatenation4dDiffShapeDim1Test(
6159     armnn::IWorkloadFactory& workloadFactory,
6160     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
6161 {
6162     return Concatenation4dDiffShapeDim1TestImpl<armnn::DataType::Float32>(
6163         workloadFactory, memoryManager, 0.0f, 0);
6164 }
6165
6166 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
6167 LayerTestResult<T, 4> Concatenation4dDiffShapeDim2TestImpl(
6168     armnn::IWorkloadFactory& workloadFactory,
6169     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6170     float qScale,
6171     int32_t qOffset)
6172 {
6173     unsigned int dimension = 2;
6174     armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
6175
6176     auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
6177         1.0f, 2.0f,
6178         3.0f, 4.0f,
6179         5.0f, 6.0f,
6180         7.0f, 8.0f,
6181         9.0f, 10.0f,
6182         11.0f, 12.0f
6183     }));
6184
6185     armnn::TensorInfo inputTensorInfo1({ 1, 3, 3, 2 }, ArmnnType, qScale, qOffset);
6186
6187     auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
6188         11.0f, 12.0f,
6189         13.0f, 14.0f,
6190         15.0f, 16.0f,
6191         17.0f, 18.0f,
6192         19.0f, 20.0f,
6193         21.0f, 22.0f,
6194         23.0f, 24.0f,
6195         25.0f, 26.0f,
6196         27.0f, 28.0f
6197     }));
6198
6199     armnn::TensorInfo outputTensorInfo({ 1, 3, 5, 2 }, ArmnnType, qScale, qOffset);
6200
6201     LayerTestResult<T, 4> result(outputTensorInfo);
6202
6203     std::vector<T> output;
6204     output.resize(outputTensorInfo.GetNumElements());
6205     Concatenate<T>(workloadFactory,
6206                    memoryManager,
6207                    {inputTensorInfo0, inputTensorInfo1},
6208                    {input0.data(), input1.data()},
6209                    outputTensorInfo,
6210                    output.data(),
6211                    dimension,
6212                    true);
6213
6214     result.output = MakeTensor<T, 4>(outputTensorInfo, output);
6215     result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
6216         1.0f, 2.0f,
6217         3.0f, 4.0f,
6218         11.0f, 12.0f,
6219         13.0f, 14.0f,
6220         15.0f, 16.0f,
6221
6222         5.0f, 6.0f,
6223         7.0f, 8.0f,
6224         17.0f, 18.0f,
6225         19.0f, 20.0f,
6226         21.0f, 22.0f,
6227
6228         9.0f, 10.0f,
6229         11.0f, 12.0f,
6230         23.0f, 24.0f,
6231         25.0f, 26.0f,
6232         27.0f, 28.0f
6233     }));
6234
6235     return result;
6236 }
6237
6238 LayerTestResult<float, 4> Concatenation4dDiffShapeDim2Test(
6239     armnn::IWorkloadFactory& workloadFactory,
6240     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
6241 {
6242     return Concatenation4dDiffShapeDim2TestImpl<armnn::DataType::Float32>(
6243         workloadFactory, memoryManager, 0.0f, 0);
6244 }
6245
6246 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
6247 LayerTestResult<T, 4> Concatenation4dDiffShapeDim3TestImpl(
6248     armnn::IWorkloadFactory& workloadFactory,
6249     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6250     float qScale,
6251     int32_t qOffset,
6252     bool useSubtensor)
6253 {
6254     unsigned int dimension = 3;
6255     armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
6256
6257     auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
6258         1.0f, 2.0f,
6259         3.0f, 4.0f,
6260         5.0f, 6.0f,
6261         7.0f, 8.0f,
6262         9.0f, 10.0f,
6263         11.0f, 12.0f
6264     }));
6265
6266     armnn::TensorInfo inputTensorInfo1({ 1, 3, 2, 3 }, ArmnnType, qScale, qOffset);
6267
6268     auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
6269         11.0f, 12.0f, 13.0f,
6270         14.0f, 15.0f, 16.0f,
6271
6272         17.0f, 18.0f, 19.0f,
6273         20.0f, 21.0f, 22.0f,
6274
6275         23.0f, 24.0f, 25.0f,
6276         26.0f, 27.0f, 28.0f
6277     }));
6278
6279     armnn::TensorInfo outputTensorInfo({ 1, 3, 2, 5 }, ArmnnType, qScale, qOffset);
6280
6281     LayerTestResult<T, 4> result(outputTensorInfo);
6282
6283     std::vector<T> output;
6284     output.resize(outputTensorInfo.GetNumElements());
6285     Concatenate<T>(workloadFactory,
6286                    memoryManager,
6287                    {inputTensorInfo0, inputTensorInfo1},
6288                    {input0.data(), input1.data()},
6289                    outputTensorInfo,
6290                    output.data(),
6291                    dimension,
6292                    useSubtensor);
6293
6294     result.output = MakeTensor<T, 4>(outputTensorInfo, output);
6295     result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
6296         1.0f, 2.0f, 11.0f, 12.0f, 13.0f,
6297         3.0f, 4.0f, 14.0f, 15.0f, 16.0f,
6298         5.0f, 6.0f, 17.0f, 18.0f, 19.0f,
6299         7.0f, 8.0f, 20.0f, 21.0f, 22.0f,
6300         9.0f, 10.0f, 23.0f, 24.0f, 25.0f,
6301         11.0f, 12.0f, 26.0f, 27.0f, 28.0f
6302     }));
6303
6304     return result;
6305 }
6306
6307 LayerTestResult<float, 4> Concatenation4dDiffShapeDim3Test(
6308     armnn::IWorkloadFactory& workloadFactory,
6309     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6310     bool useSubtensor)
6311 {
6312     return Concatenation4dDiffShapeDim3TestImpl<armnn::DataType::Float32>(
6313         workloadFactory, memoryManager, 0.0f, 0, useSubtensor);
6314 }
6315
6316 LayerTestResult<float, 2> FakeQuantizationTest(
6317     armnn::IWorkloadFactory& workloadFactory,
6318     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
6319 {
6320     constexpr unsigned int width = 2;
6321     constexpr unsigned int height = 3;
6322
6323     const armnn::TensorInfo tensorInfo({height, width },
6324         armnn::DataType::Float32);
6325     auto input = MakeTensor<float, 2>(tensorInfo, std::vector<float>({
6326        -10.0f,  -5.0f,
6327          0.0f,   5.0f,
6328         10.0f,  10.0f
6329     }));
6330
6331     LayerTestResult<float, 2> ret(tensorInfo);
6332
6333     std::unique_ptr<armnn::ITensorHandle> inputHandle  = workloadFactory.CreateTensorHandle(tensorInfo);
6334
6335     std::unique_ptr<armnn::ITensorHandle> outputHandle  = workloadFactory.CreateTensorHandle(tensorInfo);
6336
6337     armnn::FakeQuantizationQueueDescriptor data;
6338     armnn::WorkloadInfo info;
6339
6340     AddInputToWorkload(data, info, tensorInfo, inputHandle.get());
6341     AddOutputToWorkload(data, info, tensorInfo, outputHandle.get());
6342     float min = -10.f;
6343     float max = 10.f;
6344
6345     data.m_Parameters.m_Min = min;
6346     data.m_Parameters.m_Max = max;
6347
6348     armnn::PassthroughCpuTensorHandle refHandle(tensorInfo, &ret.outputExpected[0][0]);
6349     armnn::FakeQuantizationQueueDescriptor refData = data;
6350     armnn::WorkloadInfo refInfo = info;
6351     SetWorkloadOutput(refData, refInfo, 0, tensorInfo, &refHandle);
6352
6353     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateFakeQuantization(data, info);
6354
6355     inputHandle->Allocate();
6356     outputHandle->Allocate();
6357
6358     CopyDataToITensorHandle(inputHandle.get(), &input[0][0]);
6359
6360     workload->PostAllocationConfigure();
6361     workload->Execute();
6362
6363     CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get());
6364
6365     ret.outputExpected = MakeTensor<float, 2>(tensorInfo, std::vector<float>({
6366         0.0f,     63.0f,
6367         128.0f,   191.0f,
6368         255.0f,   255.0f
6369     }));
6370     return ret;
6371 }
6372
6373 namespace
6374 {
6375 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
6376 LayerTestResult<T, 4> L2NormalizationTestImpl(
6377     armnn::IWorkloadFactory& workloadFactory,
6378     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6379     const armnn::TensorShape& inputOutputTensorShape,
6380     float scale,
6381     int32_t offset,
6382     const std::vector<float>& inputValues,
6383     float outScale,
6384     int32_t outOffset,
6385     const std::vector<float>& expectedOutputValues,
6386     const armnn::DataLayout layout,
6387     float epsilon = 1e-12f)
6388 {
6389     const armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, ArmnnType, scale, offset);
6390     const armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, ArmnnType, outScale, outOffset);
6391
6392     // at this point if we require it permute the input data
6393     const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
6394     std::vector<float> inputData = inputValues;
6395     if (layout == armnn::DataLayout::NHWC)
6396     {
6397         std::vector<float> tmp(inputData.size());
6398         armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
6399         inputData = tmp;
6400     }
6401
6402     auto inputTensor = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(
6403                                                          inputTensorInfo.GetQuantizationScale(),
6404                                                          inputTensorInfo.GetQuantizationOffset(),
6405                                                          inputData));
6406
6407     std::vector<float> expectedOutputData = expectedOutputValues;
6408     if (layout == armnn::DataLayout::NHWC)
6409     {
6410         std::vector<float> tmp(expectedOutputData.size());
6411         armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, expectedOutputData.data(), tmp.data(),
6412                             sizeof(float));
6413         expectedOutputData = tmp;
6414     }
6415
6416     LayerTestResult<T, 4> result(outputTensorInfo);
6417     result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(
6418                                                                outputTensorInfo.GetQuantizationScale(),
6419                                                                outputTensorInfo.GetQuantizationOffset(),
6420                                                                expectedOutputData));
6421
6422     std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
6423     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6424
6425     armnn::L2NormalizationQueueDescriptor descriptor;
6426     descriptor.m_Parameters.m_Eps = epsilon;
6427     descriptor.m_Parameters.m_DataLayout = layout;
6428     armnn::WorkloadInfo info;
6429
6430     AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
6431     AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
6432
6433     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateL2Normalization(descriptor, info);
6434
6435     inputHandle->Allocate();
6436     outputHandle->Allocate();
6437
6438     CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
6439
6440     workload->PostAllocationConfigure();
6441     ExecuteWorkload(*workload, memoryManager);
6442
6443     CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
6444
6445     return result;
6446 }
6447
6448 float CalcInvL2Norm(std::initializer_list<float> elements)
6449 {
6450     const float reduction = std::accumulate(elements.begin(), elements.end(), 0.0f,
6451         [](float acc, float element) { return acc + element * element; });
6452     return 1.0f / sqrtf(reduction);
6453 }
6454
6455 } // anonymous namespace
6456
6457 template<armnn::DataType ArmnnType, typename T>
6458 LayerTestResult<T, 2> Pad2dTestCommon(
6459     armnn::IWorkloadFactory& workloadFactory,
6460     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6461     float qScale,
6462     int32_t qOffset,
6463     const float customPaddingValue)
6464 {
6465     const armnn::TensorShape inputShape{ 3, 3 };
6466     const armnn::TensorShape outputShape{ 7, 7 };
6467
6468     const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset);
6469     const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType, qScale, qOffset);
6470
6471     std::vector<T> inputValues(
6472     QuantizedVector<T>(qScale, qOffset,
6473     {
6474       // Height (3) x Width (3)
6475       4, 8, 6,
6476       7, 4, 4,
6477       3, 2, 4
6478     }));
6479
6480     auto p = customPaddingValue;
6481     std::vector<T> expectedOutputValues;
6482     expectedOutputValues = (
6483     QuantizedVector<T>(qScale, qOffset,
6484     {
6485       p, p, p, p, p, p, p,
6486       p, p, p, p, p, p, p,
6487       p, p, 4, 8, 6, p, p,
6488       p, p, 7, 4, 4, p, p,
6489       p, p, 3, 2, 4, p, p,
6490       p, p, p, p, p, p, p,
6491       p, p, p, p, p, p, p
6492     }));
6493
6494     auto inputTensor = MakeTensor<T, 2>(inputTensorInfo, std::vector<T>(inputValues));
6495
6496     LayerTestResult<T, 2> result(outputTensorInfo);
6497     result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, std::vector<T>(expectedOutputValues));
6498
6499     std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
6500     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6501
6502     armnn::PadQueueDescriptor descriptor;
6503
6504     std::vector<std::pair<unsigned int, unsigned int>> padList;
6505     padList.push_back(std::pair<unsigned int, unsigned int>(2,2));
6506     padList.push_back(std::pair<unsigned int, unsigned int>(2,2));
6507
6508     descriptor.m_Parameters.m_PadList = padList;
6509     descriptor.m_Parameters.m_PadValue = customPaddingValue;
6510     armnn::WorkloadInfo info;
6511
6512     AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
6513     AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
6514
6515     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
6516
6517     inputHandle->Allocate();
6518     outputHandle->Allocate();
6519
6520     CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]);
6521
6522     workload->PostAllocationConfigure();
6523     workload->Execute();
6524
6525     CopyDataFromITensorHandle(&result.output[0][0], outputHandle.get());
6526
6527     return result;
6528 }
6529
6530 template<armnn::DataType ArmnnType, typename T>
6531 LayerTestResult<T, 3> Pad3dTestCommon(
6532     armnn::IWorkloadFactory& workloadFactory,
6533     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6534     float qScale,
6535     int32_t qOffset)
6536 {
6537     const armnn::TensorShape inputShape{ 2, 2, 2 };
6538     const armnn::TensorShape outputShape{ 3, 5, 6 };
6539
6540     const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset);
6541     const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType, qScale, qOffset);
6542
6543     std::vector<T> inputValues(
6544       QuantizedVector<T>(qScale,qOffset,
6545     {
6546         // Channel 0, Height (2) x Width (2)
6547         0, 4,
6548         2, 5,
6549
6550         // Channel 1, Height (2) x Width (2)
6551         6, 1,
6552         5, 2
6553     }));
6554
6555     std::vector<T> expectedOutputValues(
6556       QuantizedVector<T>(qScale,qOffset,
6557     {
6558
6559         0, 0, 0, 0, 0, 0,
6560         0, 0, 0, 0, 0, 0,
6561         0, 0, 0, 4, 0, 0,
6562         0, 0, 2, 5, 0, 0,
6563         0, 0, 0, 0, 0, 0,
6564
6565         0, 0, 0, 0, 0, 0,
6566         0, 0, 0, 0, 0, 0,
6567         0, 0, 6, 1, 0, 0,
6568         0, 0, 5, 2, 0, 0,
6569         0, 0, 0, 0, 0, 0,
6570
6571         0, 0, 0, 0, 0, 0,
6572         0, 0, 0, 0, 0, 0,
6573         0, 0, 0, 0, 0, 0,
6574         0, 0, 0, 0, 0, 0,
6575         0, 0, 0, 0, 0, 0
6576
6577     }));
6578
6579     auto inputTensor = MakeTensor<T, 3>(inputTensorInfo, std::vector<T>(inputValues));
6580
6581     LayerTestResult<T, 3> result(outputTensorInfo);
6582     result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, std::vector<T>(expectedOutputValues));
6583
6584     std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
6585     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6586
6587     armnn::PadQueueDescriptor descriptor;
6588
6589     std::vector<std::pair<unsigned int, unsigned int>> PadList;
6590     PadList.push_back(std::pair<unsigned int, unsigned int>(0,1));
6591     PadList.push_back(std::pair<unsigned int, unsigned int>(2,1));
6592     PadList.push_back(std::pair<unsigned int, unsigned int>(2,2));
6593
6594     descriptor.m_Parameters.m_PadList = PadList;
6595     armnn::WorkloadInfo info;
6596
6597     AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
6598     AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
6599
6600     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
6601
6602     inputHandle->Allocate();
6603     outputHandle->Allocate();
6604
6605     CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0]);
6606
6607     workload->PostAllocationConfigure();
6608     workload->Execute();
6609
6610     CopyDataFromITensorHandle(&result.output[0][0][0], outputHandle.get());
6611
6612     return result;
6613 }
6614
6615 template<armnn::DataType ArmnnType, typename T>
6616 LayerTestResult<T, 4> Pad4dTestCommon(
6617     armnn::IWorkloadFactory& workloadFactory,
6618     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6619     float qScale,
6620     int32_t qOffset)
6621 {
6622     const armnn::TensorShape inputShape{ 2, 2, 3, 2 };
6623     const armnn::TensorShape outputShape{ 4, 5, 7, 4 };
6624
6625     const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset);
6626     const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType, qScale, qOffset);
6627
6628     std::vector<T> inputValues(
6629       QuantizedVector<T>(qScale,qOffset,
6630     {
6631         // Batch 0, Channel 0, Height (3) x Width (2)
6632         0, 1,
6633         2, 3,
6634         4, 5,
6635
6636         // Batch 0, Channel 1, Height (3) x Width (2)
6637         6, 7,
6638         8, 9,
6639         10, 11,
6640
6641         // Batch 1, Channel 0, Height (3) x Width (2)
6642         12, 13,
6643         14, 15,
6644         16, 17,
6645
6646         // Batch 1, Channel 1, Height (3) x Width (2)
6647         18, 19,
6648         20, 21,
6649         22, 23
6650     }));
6651
6652     std::vector<T> expectedOutputValues(
6653       QuantizedVector<T>(qScale,qOffset,
6654     {
6655         0, 0, 0, 0,
6656         0, 0, 0, 0,
6657         0, 0, 0, 0,
6658         0, 0, 0, 0,
6659         0, 0, 0, 0,
6660         0, 0, 0, 0,
6661         0, 0, 0, 0,
6662
6663         0, 0, 0, 0,
6664         0, 0, 0, 0,
6665         0, 0, 0, 0,
6666         0, 0, 0, 0,
6667         0, 0, 0, 0,
6668         0, 0, 0, 0,
6669         0, 0, 0, 0,
6670
6671         0, 0, 0, 0,
6672         0, 0, 0, 0,
6673         0, 0, 0, 0,
6674         0, 0, 0, 0,
6675         0, 0, 0, 0,
6676         0, 0, 0, 0,
6677         0, 0, 0, 0,
6678
6679         0, 0, 0, 0,
6680         0, 0, 0, 0,
6681         0, 0, 0, 0,
6682         0, 0, 0, 0,
6683         0, 0, 0, 0,
6684         0, 0, 0, 0,
6685         0, 0, 0, 0,
6686
6687         0, 0, 0, 0,
6688         0, 0, 0, 0,
6689         0, 0, 0, 0,
6690         0, 0, 0, 0,
6691         0, 0, 0, 0,
6692         0, 0, 0, 0,
6693         0, 0, 0, 0,
6694
6695         0, 0, 0, 0,
6696         0, 0, 0, 0,
6697         0, 0, 0, 0,
6698         0, 0, 0, 0,
6699         0, 0, 0, 0,
6700         0, 0, 0, 0,
6701         0, 0, 0, 0,
6702
6703         0, 0, 0, 0,
6704         0, 0, 0, 0,
6705         0, 0, 0, 0,
6706         0, 0, 0, 0,
6707         0, 0, 0, 0,
6708         0, 0, 0, 0,
6709         0, 0, 0, 0,
6710
6711         0, 0, 0, 0,
6712         0, 0, 0, 0,
6713         0, 0, 0, 0,
6714         0, 0, 1, 0,
6715         0, 2, 3, 0,
6716         0, 4, 5, 0,
6717         0, 0, 0, 0,
6718
6719         0, 0, 0, 0,
6720         0, 0, 0, 0,
6721         0, 0, 0, 0,
6722         0, 6, 7, 0,
6723         0, 8, 9, 0,
6724         0, 10, 11, 0,
6725         0, 0, 0, 0,
6726
6727         0, 0, 0, 0,
6728         0, 0, 0, 0,
6729         0, 0, 0, 0,
6730         0, 0, 0, 0,
6731         0, 0, 0, 0,
6732         0, 0, 0, 0,
6733         0, 0, 0, 0,
6734
6735         0, 0, 0, 0,
6736         0, 0, 0, 0,
6737         0, 0, 0, 0,
6738         0, 0, 0, 0,
6739         0, 0, 0, 0,
6740         0, 0, 0, 0,
6741         0, 0, 0, 0,
6742
6743         0, 0, 0, 0,
6744         0, 0, 0, 0,
6745         0, 0, 0, 0,
6746         0, 0, 0, 0,
6747         0, 0, 0, 0,
6748         0, 0, 0, 0,
6749         0, 0, 0, 0,
6750
6751         0, 0, 0, 0,
6752         0, 0, 0, 0,
6753         0, 0, 0, 0,
6754         0, 12, 13, 0,
6755         0, 14, 15, 0,
6756         0, 16, 17, 0,
6757         0, 0, 0, 0,
6758
6759         0, 0, 0, 0,
6760         0, 0, 0, 0,
6761         0, 0, 0, 0,
6762         0, 18, 19, 0,
6763         0, 20, 21, 0,
6764         0, 22, 23, 0,
6765         0, 0, 0, 0,
6766
6767         0, 0, 0, 0,
6768         0, 0, 0, 0,
6769         0, 0, 0, 0,
6770         0, 0, 0, 0,
6771         0, 0, 0, 0,
6772         0, 0, 0, 0,
6773         0, 0, 0, 0,
6774
6775         0, 0, 0, 0,
6776         0, 0, 0, 0,
6777         0, 0, 0, 0,
6778         0, 0, 0, 0,
6779         0, 0, 0, 0,
6780         0, 0, 0, 0,
6781         0, 0, 0, 0,
6782
6783         0, 0, 0, 0,
6784         0, 0, 0, 0,
6785         0, 0, 0, 0,
6786         0, 0, 0, 0,
6787         0, 0, 0, 0,
6788         0, 0, 0, 0,
6789         0, 0, 0, 0,
6790
6791         0, 0, 0, 0,
6792         0, 0, 0, 0,
6793         0, 0, 0, 0,
6794         0, 0, 0, 0,
6795         0, 0, 0, 0,
6796         0, 0, 0, 0,
6797         0, 0, 0, 0,
6798
6799         0, 0, 0, 0,
6800         0, 0, 0, 0,
6801         0, 0, 0, 0,
6802         0, 0, 0, 0,
6803         0, 0, 0, 0,
6804         0, 0, 0, 0,
6805         0, 0, 0, 0,
6806
6807         0, 0, 0, 0,
6808         0, 0, 0, 0,
6809         0, 0, 0, 0,
6810         0, 0, 0, 0,
6811         0, 0, 0, 0,
6812         0, 0, 0, 0,
6813         0, 0, 0, 0
6814     }));
6815
6816     auto inputTensor = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(inputValues));
6817
6818     LayerTestResult<T, 4> result(outputTensorInfo);
6819     result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(expectedOutputValues));
6820
6821     std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
6822     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6823
6824     armnn::PadQueueDescriptor descriptor;
6825
6826     std::vector<std::pair<unsigned int, unsigned int>> PadList;
6827     PadList.push_back(std::pair<unsigned int, unsigned int>(1,1));
6828     PadList.push_back(std::pair<unsigned int, unsigned int>(2,1));
6829     PadList.push_back(std::pair<unsigned int, unsigned int>(3,1));
6830     PadList.push_back(std::pair<unsigned int, unsigned int>(1,1));
6831
6832     descriptor.m_Parameters.m_PadList = PadList;
6833     armnn::WorkloadInfo info;
6834
6835     AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
6836     AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
6837
6838     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
6839
6840     inputHandle->Allocate();
6841     outputHandle->Allocate();
6842
6843     CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
6844
6845     workload->PostAllocationConfigure();
6846     workload->Execute();
6847
6848     CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
6849
6850     return result;
6851 }
6852
6853 LayerTestResult<uint8_t, 2> PadUint82dTest(
6854     armnn::IWorkloadFactory& workloadFactory,
6855     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
6856 {
6857     return Pad2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
6858 }
6859
6860 LayerTestResult<uint8_t, 2> PadUint82dCustomPaddingTest(
6861     armnn::IWorkloadFactory& workloadFactory,
6862     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
6863 {
6864     return Pad2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0, 1.0f);
6865 }
6866
6867 LayerTestResult<uint8_t, 3> PadUint83dTest(
6868     armnn::IWorkloadFactory& workloadFactory,
6869     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
6870 {
6871     return Pad3dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
6872 }
6873
6874 LayerTestResult<uint8_t, 4> PadUint84dTest(
6875     armnn::IWorkloadFactory& workloadFactory,
6876     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
6877 {
6878     return Pad4dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
6879 }
6880
6881
6882 template LayerTestResult<typename armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 2>
6883 Pad2dTestCommon<armnn::DataType::QuantisedSymm16>(
6884     armnn::IWorkloadFactory& workloadFactory,
6885     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6886     float qScale,
6887     int32_t qOffset,
6888     const float customPaddingValue);
6889
6890 template LayerTestResult<typename armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 3>
6891 Pad3dTestCommon<armnn::DataType::QuantisedSymm16>(
6892     armnn::IWorkloadFactory& workloadFactory,
6893     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6894     float qScale,
6895     int32_t qOffset);
6896
6897 template LayerTestResult<typename armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
6898 Pad4dTestCommon<armnn::DataType::QuantisedSymm16>(
6899     armnn::IWorkloadFactory& workloadFactory,
6900     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6901     float qScale,
6902     int32_t qOffset);
6903
6904 LayerTestResult<float, 2> PadFloat322dTest(
6905     armnn::IWorkloadFactory& workloadFactory,
6906     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
6907 {
6908     return Pad2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
6909 }
6910
6911 LayerTestResult<float, 2> PadFloat322dCustomPaddingTest(
6912     armnn::IWorkloadFactory& workloadFactory,
6913     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
6914 {
6915     return Pad2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0, 1.0f);
6916 }
6917
6918 LayerTestResult<float, 3> PadFloat323dTest(
6919     armnn::IWorkloadFactory& workloadFactory,
6920     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
6921 {
6922     return Pad3dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
6923 }
6924
6925 LayerTestResult<float, 4> PadFloat324dTest(
6926     armnn::IWorkloadFactory& workloadFactory,
6927     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
6928 {
6929     return Pad4dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
6930 }
6931
6932 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
6933 LayerTestResult<T, 4> L2NormalizationEpsilonTestCommon(
6934         armnn::IWorkloadFactory& workloadFactory,
6935         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6936         float scale,
6937         int32_t offset,
6938         float outScale,
6939         int32_t outOffset,
6940         const armnn::DataLayout layout,
6941         float epsilon)
6942 {
6943     // Width: 1
6944     // Height: 1
6945     // Channels: 3
6946     // BatchSize: 1
6947     unsigned int numberOfBatches = 1;
6948     unsigned int numberOfChannels = 3;
6949     unsigned int height = 1;
6950     unsigned int width = 1;
6951
6952     const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
6953             numberOfBatches, numberOfChannels, height, width, layout);
6954
6955     // 0.0000001^2 + 0.00000002^2 + 0.00000003^2 < 1e-12
6956     std::vector<float> inputValues
6957     {
6958         // Batch 0, Channel 0, Height (1) x Width (1)
6959         0.00000001f,
6960
6961         // Batch 0, Channel 1, Height (1) x Width (1)
6962         0.00000002f,
6963
6964         // Batch 0, Channel 2, Height (1) x Width (1)
6965         0.00000003f,
6966     };
6967
6968     const float approxInvL2Norm = 1.f / sqrtf(epsilon);
6969     std::vector<float> expectedOutputValues
6970     {
6971         // Batch 0, Channel 0, Height (1) x Width (1)
6972         0.00000001f * approxInvL2Norm,
6973         0.00000002f * approxInvL2Norm,
6974         0.00000003f * approxInvL2Norm,
6975     };
6976
6977     return L2NormalizationTestImpl<ArmnnType>(workloadFactory, memoryManager, inputOutputShape, scale, offset,
6978                                               inputValues, outScale, outOffset, expectedOutputValues, layout,
6979                                               epsilon);
6980 }
6981
6982
6983 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
6984 LayerTestResult<T, 4> L2Normalization1dTestCommon(
6985         armnn::IWorkloadFactory& workloadFactory,
6986         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6987         float scale,
6988         int32_t offset,
6989         float outScale,
6990         int32_t outOffset,
6991         const armnn::DataLayout layout)
6992 {
6993     // Width: 1
6994     // Height: 1
6995     // Channels: 10
6996     // BatchSize: 1
6997     unsigned int numberOfBatches = 1;
6998     unsigned int numberOfChannels = 10;
6999     unsigned int height = 1;
7000     unsigned int width = 1;
7001
7002
7003     const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
7004             numberOfBatches, numberOfChannels, height, width, layout);
7005     std::vector<float> inputValues
7006     {
7007         // Batch 0, Channel 0, Height (1) x Width (1)
7008         1.0f,
7009
7010         // Batch 0, Channel 1, Height (1) x Width (1)
7011         2.0f,
7012
7013         // Batch 0, Channel 2, Height (1) x Width (1)
7014         3.0f,
7015
7016         // Batch 0, Channel 3, Height (1) x Width (1)
7017         4.0f,
7018
7019         // Batch 0, Channel 4, Height (1) x Width (1)
7020         5.0f,
7021
7022         // Batch 0, Channel 5, Height (1) x Width (1)
7023         6.0f,
7024
7025         // Batch 0, Channel 6, Height (1) x Width (1)
7026         7.0f,
7027
7028         // Batch 0, Channel 7, Height (1) x Width (1)
7029         8.0f,
7030
7031         // Batch 0, Channel 8, Height (1) x Width (1)
7032         9.0f,
7033
7034         // Batch 0, Channel 9, Height (1) x Width (1)
7035         10.0f
7036     };
7037     const float approxInvL2Norm = 0.050964719f;
7038     std::vector<float> expectedOutputValues
7039     {
7040         // Batch 0, Channel 0, Height (1) x Width (1)
7041         1.0f * approxInvL2Norm,
7042         2.0f * approxInvL2Norm,
7043         3.0f * approxInvL2Norm,
7044         4.0f * approxInvL2Norm,
7045         5.0f * approxInvL2Norm,
7046         6.0f * approxInvL2Norm,
7047         7.0f * approxInvL2Norm,
7048         8.0f * approxInvL2Norm,
7049         9.0f * approxInvL2Norm,
7050         10.0f * approxInvL2Norm
7051     };
7052
7053
7054     return L2NormalizationTestImpl<ArmnnType>(workloadFactory, memoryManager, inputOutputShape, scale, offset,
7055                                               inputValues, outScale, outOffset, expectedOutputValues, layout);
7056 }
7057
7058 LayerTestResult<float, 4> L2NormalizationDefaultEpsilonTest(
7059         armnn::IWorkloadFactory& workloadFactory,
7060         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7061         const armnn::DataLayout layout)
7062 {
7063     // Dummy descriptor to get the default value of epsilon.
7064     armnn::L2NormalizationDescriptor descriptor;
7065
7066     return L2NormalizationEpsilonTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.f, 0, 0.f, 0,
7067                                                                       layout, descriptor.m_Eps);
7068 }
7069
7070 LayerTestResult<float, 4> L2NormalizationNonDefaultEpsilonTest(
7071         armnn::IWorkloadFactory& workloadFactory,
7072         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7073         const armnn::DataLayout layout)
7074 {
7075     return L2NormalizationEpsilonTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.f, 0, 0.f, 0,
7076                                                                       layout, 1e-9f);
7077 }
7078
7079 LayerTestResult<float, 4> L2Normalization1dTest(
7080     armnn::IWorkloadFactory& workloadFactory,
7081     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7082     const armnn::DataLayout layout)
7083 {
7084     return L2Normalization1dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.f, 0, 0.f, 0,layout);
7085 }
7086
7087 LayerTestResult<int16_t, 4> L2Normalization1dInt16Test(
7088     armnn::IWorkloadFactory& workloadFactory,
7089     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7090     const armnn::DataLayout layout)
7091 {
7092     return L2Normalization1dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.f, 0, 1.f, 0,
7093                                                                          layout);
7094 }
7095
7096 LayerTestResult<uint8_t, 4> L2Normalization1dUint8Test(
7097     armnn::IWorkloadFactory& workloadFactory,
7098     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7099     const armnn::DataLayout layout)
7100 {
7101     return L2Normalization1dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.f, 0,
7102                                                                          1.f/128, 128, layout);
7103 }
7104
7105 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
7106 LayerTestResult<T, 4> L2Normalization2dTestCommon(
7107     armnn::IWorkloadFactory& workloadFactory,
7108     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7109     float scale,
7110     int32_t offset,
7111     float outScale,
7112     int32_t outOffset,
7113     const armnn::DataLayout layout)
7114 {
7115     // Width: 5
7116     // Height: 1
7117     // Channels: 2
7118     // BatchSize: 1
7119     unsigned int numberOfBatches = 1;
7120     unsigned int numberOfChannels = 2;
7121     unsigned int height = 1;
7122     unsigned int width = 5;
7123
7124     const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
7125             numberOfBatches, numberOfChannels, height, width, layout);
7126     std::vector<float> inputValues
7127     {
7128         // Batch 0, Channel 0, Height (1) x Width (5)
7129         1.0f, 3.0f, 5.0f, 7.0f,  9.0f,
7130
7131         // Batch 0, Channel 1, Height (1) x Width (5)
7132         2.0f, 4.0f, 6.0f, 8.0f, 10.0f
7133     };
7134     std::vector<float> expectedOutputValues
7135     {
7136         // Batch 0, Channel 0, Height (1) x Width (5)
7137         1.0f * CalcInvL2Norm({ 1.0f,  2.0f }),
7138         3.0f * CalcInvL2Norm({ 3.0f,  4.0f }),
7139         5.0f * CalcInvL2Norm({ 5.0f,  6.0f }),
7140         7.0f * CalcInvL2Norm({ 7.0f,  8.0f }),
7141         9.0f * CalcInvL2Norm({ 9.0f, 10.0f }),
7142
7143         // Batch 0, Channel 1, Height (1) x Width (5)
7144         2.0f * CalcInvL2Norm({ 1.0f,  2.0f }),
7145         4.0f * CalcInvL2Norm({ 3.0f,  4.0f }),
7146         6.0f * CalcInvL2Norm({ 5.0f,  6.0f }),
7147         8.0f * CalcInvL2Norm({ 7.0f,  8.0f }),
7148         10.0f * CalcInvL2Norm({ 9.0f, 10.0f })
7149     };
7150
7151     return L2NormalizationTestImpl<ArmnnType>(workloadFactory, memoryManager, inputOutputShape, scale, offset,
7152                                               inputValues, outScale, outOffset, expectedOutputValues, layout);
7153 }
7154
7155 LayerTestResult<float, 4> L2Normalization2dTest(
7156     armnn::IWorkloadFactory& workloadFactory,
7157     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7158     const armnn::DataLayout layout)
7159 {
7160     return L2Normalization2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.f, 0, 0.f, 0,
7161                                                                  layout);
7162 }
7163
7164 LayerTestResult<int16_t, 4> L2Normalization2dInt16Test(
7165     armnn::IWorkloadFactory& workloadFactory,
7166     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7167     const armnn::DataLayout layout)
7168 {
7169     return L2Normalization1dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.f, 0, 1.f, 0,
7170                                                                          layout);
7171 }
7172
7173 LayerTestResult<uint8_t, 4> L2Normalization2dUint8Test(
7174     armnn::IWorkloadFactory& workloadFactory,
7175     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7176     const armnn::DataLayout layout)
7177 {
7178     return L2Normalization1dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.f, 0,
7179                                                                          1.f/128, 128, layout);
7180 }
7181
7182 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
7183 LayerTestResult<T, 4> L2Normalization3dTestCommon(
7184     armnn::IWorkloadFactory& workloadFactory,
7185     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7186     float scale,
7187     int32_t offset,
7188     float outScale,
7189     int32_t outOffset,
7190     const armnn::DataLayout layout)
7191 {
7192     // Width: 3
7193     // Height: 4
7194     // Channels: 2
7195     // BatchSize: 1
7196     unsigned int numberOfBatches = 1;
7197     unsigned int numberOfChannels = 2;
7198     unsigned int height = 4;
7199     unsigned int width = 3;
7200
7201     const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
7202             numberOfBatches, numberOfChannels, height, width, layout);
7203     std::vector<float> inputValues
7204     {
7205         // Batch 0, Channel 0, Height (4) x Width (3)
7206         119.0f,  21.0f, 150.0f,
7207         149.0f,  32.0f, 179.0f,
7208         15.0f, 227.0f, 141.0f,
7209         147.0f, 199.0f, 220.0f,
7210
7211         // Batch 0, Channel 1, Height (4) x Width (3)
7212         110.0f, 140.0f,  73.0f,
7213         211.0f, 212.0f,  89.0f,
7214         24.0f, 138.0f, 188.0f,
7215         162.0f,  12.0f, 161.0f
7216     };
7217     std::vector<float> expectedOutputValues
7218     {
7219         // Batch 0, Channel 0, Height (4) x Width (3)
7220         119.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
7221         21.0f * CalcInvL2Norm({  21.0f, 140.0f }),
7222         150.0f * CalcInvL2Norm({ 150.0f,  73.0f }),
7223         149.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
7224         32.0f * CalcInvL2Norm({  32.0f, 212.0f }),
7225         179.0f * CalcInvL2Norm({ 179.0f,  89.0f }),
7226         15.0f * CalcInvL2Norm({  15.0f,  24.0f }),
7227         227.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
7228         141.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
7229         147.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
7230         199.0f * CalcInvL2Norm({ 199.0f,  12.0f }),
7231         220.0f * CalcInvL2Norm({ 220.0f, 161.0f }),
7232
7233         // Batch 0, Channel 1, Height (4) x Width (3)
7234         110.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
7235         140.0f * CalcInvL2Norm({  21.0f, 140.0f }),
7236         73.0f * CalcInvL2Norm({ 150.0f,  73.0f }),
7237         211.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
7238         212.0f * CalcInvL2Norm({  32.0f, 212.0f }),
7239         89.0f * CalcInvL2Norm({ 179.0f,  89.0f }),
7240         24.0f * CalcInvL2Norm({  15.0f,  24.0f }),
7241         138.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
7242         188.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
7243         162.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
7244         12.0f * CalcInvL2Norm({ 199.0f,  12.0f }),
7245         161.0f * CalcInvL2Norm({ 220.0f, 161.0f })
7246     };
7247
7248     return L2NormalizationTestImpl<ArmnnType>(workloadFactory, memoryManager, inputOutputShape, scale, offset,
7249                                               inputValues, outScale, outOffset, expectedOutputValues, layout);
7250 }
7251
7252 LayerTestResult<float, 4> L2Normalization3dTest(
7253     armnn::IWorkloadFactory& workloadFactory,
7254     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7255     const armnn::DataLayout layout)
7256 {
7257     return L2Normalization3dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.f, 0, 0.f, 0,
7258                                                                  layout);
7259 }
7260
7261 LayerTestResult<int16_t, 4> L2Normalization3dInt16Test(
7262     armnn::IWorkloadFactory& workloadFactory,
7263     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7264     const armnn::DataLayout layout)
7265 {
7266     return L2Normalization1dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.f, 0, 1.f, 0,
7267                                                                          layout);
7268 }
7269
7270 LayerTestResult<uint8_t, 4> L2Normalization3dUint8Test(
7271     armnn::IWorkloadFactory& workloadFactory,
7272     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7273     const armnn::DataLayout layout)
7274 {
7275     return L2Normalization1dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.f, 0,
7276                                                                          1.f/128, 128, layout);
7277 }
7278
7279 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
7280 LayerTestResult<T, 4> L2Normalization4dTestCommon(
7281     armnn::IWorkloadFactory& workloadFactory,
7282     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7283     float scale,
7284     int32_t offset,
7285     float outScale,
7286     int32_t outOffset,
7287     const armnn::DataLayout layout)
7288 {
7289     // Width: 3
7290     // Height: 4
7291     // Channels: 3
7292     // BatchSize: 2
7293     unsigned int numberOfBatches = 2;
7294     unsigned int numberOfChannels = 3;
7295     unsigned int height = 4;
7296     unsigned int width = 3;
7297
7298     const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
7299             numberOfBatches, numberOfChannels, height, width, layout);
7300     std::vector<float> inputValues
7301     {
7302         // Batch 0, Channel 0, Height (4) x Width (3)
7303         235.0f,  46.0f, 178.0f,
7304         100.0f, 123.0f,  19.0f,
7305         172.0f,  74.0f, 250.0f,
7306         6.0f, 195.0f,  80.0f,
7307
7308         // Batch 0, Channel 1, Height (4) x Width (3)
7309         113.0f,  95.0f, 202.0f,
7310         77.0f, 114.0f,  71.0f,
7311         122.0f, 246.0f, 166.0f,
7312         82.0f,  28.0f,  37.0f,
7313
7314         // Batch 0, Channel 2, Height (4) x Width (3)
7315         56.0f, 170.0f, 162.0f,
7316         194.0f,  89.0f, 254.0f,
7317         12.0f, 209.0f, 200.0f,
7318         1.0f,  64.0f,  54.0f,
7319
7320         // Batch 1, Channel 0, Height (4) x Width (3)
7321         67.0f,  90.0f,  49.0f,
7322         7.0f, 163.0f,  18.0f,
7323         25.0f, 117.0f, 103.0f,
7324         247.0f,  59.0f, 189.0f,
7325
7326         // Batch 1, Channel 1, Height (4) x Width (3)
7327         239.0f, 104.0f, 199.0f,
7328         17.0f, 124.0f, 153.0f,
7329         222.0f, 217.0f, 75.0f,
7330         32.0f, 126.0f, 21.0f,
7331
7332         // Batch 1, Channel 2, Height (4) x Width (3)
7333         97.0f, 145.0f, 215.0f,
7334         115.0f, 116.0f, 238.0f,
7335         226.0f,  16.0f, 132.0f,
7336         92.0f, 125.0f,  88.0f
7337     };
7338     std::vector<float> expectedOutputValues
7339     {
7340         // Batch 0, Channel 0, Height (4) x Width (3)
7341         235.0f * CalcInvL2Norm({ 235.0f, 113.0f,  56.0f }),
7342         46.0f * CalcInvL2Norm({  46.0f,  95.0f, 170.0f }),
7343         178.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
7344         100.0f * CalcInvL2Norm({ 100.0f,  77.0f, 194.0f }),
7345         123.0f * CalcInvL2Norm({ 123.0f, 114.0f,  89.0f }),
7346         19.0f * CalcInvL2Norm({  19.0f,  71.0f, 254.0f }),
7347         172.0f * CalcInvL2Norm({ 172.0f, 122.0f,  12.0f }),
7348         74.0f * CalcInvL2Norm({  74.0f, 246.0f, 209.0f }),
7349         250.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
7350         6.0f * CalcInvL2Norm({   6.0f,  82.0f,   1.0f }),
7351         195.0f * CalcInvL2Norm({ 195.0f,  28.0f,  64.0f }),
7352         80.0f * CalcInvL2Norm({  80.0f,  37.0f,  54.0f }),
7353
7354         // Batch 0, Channel 1, Height (4) x Width (3)
7355         113.0f * CalcInvL2Norm({ 235.0f, 113.0f,  56.0f }),
7356         95.0f * CalcInvL2Norm({  46.0f,  95.0f, 170.0f }),
7357         202.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
7358         77.0f * CalcInvL2Norm({ 100.0f,  77.0f, 194.0f }),
7359         114.0f * CalcInvL2Norm({ 123.0f, 114.0f,  89.0f }),
7360         71.0f * CalcInvL2Norm({  19.0f,  71.0f, 254.0f }),
7361         122.0f * CalcInvL2Norm({ 172.0f, 122.0f,  12.0f }),
7362         246.0f * CalcInvL2Norm({  74.0f, 246.0f, 209.0f }),
7363         166.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
7364         82.0f * CalcInvL2Norm({   6.0f,  82.0f,   1.0f }),
7365         28.0f * CalcInvL2Norm({ 195.0f,  28.0f,  64.0f }),
7366         37.0f * CalcInvL2Norm({  80.0f,  37.0f,  54.0f }),
7367
7368         // Batch 0, Channel 2, Height (4) x Width (3)
7369         56.0f * CalcInvL2Norm({ 235.0f, 113.0f,  56.0f }),
7370         170.0f * CalcInvL2Norm({  46.0f,  95.0f, 170.0f }),
7371         162.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
7372         194.0f * CalcInvL2Norm({ 100.0f,  77.0f, 194.0f }),
7373         89.0f * CalcInvL2Norm({ 123.0f, 114.0f,  89.0f }),
7374         254.0f * CalcInvL2Norm({  19.0f,  71.0f, 254.0f }),
7375         12.0f * CalcInvL2Norm({ 172.0f, 122.0f,  12.0f }),
7376         209.0f * CalcInvL2Norm({  74.0f, 246.0f, 209.0f }),
7377         200.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
7378         1.0f * CalcInvL2Norm({   6.0f,  82.0f,   1.0f }),
7379         64.0f * CalcInvL2Norm({ 195.0f,  28.0f,  64.0f }),
7380         54.0f * CalcInvL2Norm({  80.0f,  37.0f,  54.0f }),
7381
7382         // Batch 1, Channel 0, Height (4) x Width (3)
7383         67.0f * CalcInvL2Norm({  67.0f, 239.0f,  97.0f }),
7384         90.0f * CalcInvL2Norm({  90.0f, 104.0f, 145.0f }),
7385         49.0f * CalcInvL2Norm({  49.0f, 199.0f, 215.0f }),
7386         7.0f * CalcInvL2Norm({   7.0f,  17.0f, 115.0f }),
7387         163.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
7388         18.0f * CalcInvL2Norm({  18.0f, 153.0f, 238.0f }),
7389         25.0f * CalcInvL2Norm({  25.0f, 222.0f, 226.0f }),
7390         117.0f * CalcInvL2Norm({ 117.0f, 217.0f,  16.0f }),
7391         103.0f * CalcInvL2Norm({ 103.0f,  75.0f, 132.0f }),
7392         247.0f * CalcInvL2Norm({ 247.0f,  32.0f,  92.0f }),
7393         59.0f * CalcInvL2Norm({  59.0f, 126.0f, 125.0f }),
7394         189.0f * CalcInvL2Norm({ 189.0f,  21.0f,  88.0f }),
7395
7396         // Batch 1, Channel 1, Height (4) x Width (3)
7397         239.0f * CalcInvL2Norm({  67.0f, 239.0f,  97.0f }),
7398         104.0f * CalcInvL2Norm({  90.0f, 104.0f, 145.0f }),
7399         199.0f * CalcInvL2Norm({  49.0f, 199.0f, 215.0f }),
7400         17.0f * CalcInvL2Norm({   7.0f,  17.0f, 115.0f }),
7401         124.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
7402         153.0f * CalcInvL2Norm({  18.0f, 153.0f, 238.0f }),
7403         222.0f * CalcInvL2Norm({  25.0f, 222.0f, 226.0f }),
7404         217.0f * CalcInvL2Norm({ 117.0f, 217.0f,  16.0f }),
7405         75.0f * CalcInvL2Norm({ 103.0f,  75.0f, 132.0f }),
7406         32.0f * CalcInvL2Norm({ 247.0f,  32.0f,  92.0f }),
7407         126.0f * CalcInvL2Norm({  59.0f, 126.0f, 125.0f }),
7408         21.0f * CalcInvL2Norm({ 189.0f,  21.0f,  88.0f }),
7409
7410         // Batch 1, Channel 2, Height (4) x Width (3)
7411         97.0f * CalcInvL2Norm({  67.0f, 239.0f,  97.0f }),
7412         145.0f * CalcInvL2Norm({  90.0f, 104.0f, 145.0f }),
7413         215.0f * CalcInvL2Norm({  49.0f, 199.0f, 215.0f }),
7414         115.0f * CalcInvL2Norm({   7.0f,  17.0f, 115.0f }),
7415         116.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
7416         238.0f * CalcInvL2Norm({  18.0f, 153.0f, 238.0f }),
7417         226.0f * CalcInvL2Norm({  25.0f, 222.0f, 226.0f }),
7418         16.0f * CalcInvL2Norm({ 117.0f, 217.0f,  16.0f }),
7419         132.0f * CalcInvL2Norm({ 103.0f,  75.0f, 132.0f }),
7420         92.0f * CalcInvL2Norm({ 247.0f,  32.0f,  92.0f }),
7421         125.0f * CalcInvL2Norm({  59.0f, 126.0f, 125.0f }),
7422         88.0f * CalcInvL2Norm({ 189.0f,  21.0f,  88.0f })
7423     };
7424
7425     return L2NormalizationTestImpl<ArmnnType>(workloadFactory, memoryManager, inputOutputShape, scale, offset,
7426                                               inputValues, outScale, outOffset, expectedOutputValues, layout);
7427 }
7428
7429 LayerTestResult<float, 4> L2Normalization4dTest(
7430     armnn::IWorkloadFactory& workloadFactory,
7431     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7432     const armnn::DataLayout layout)
7433 {
7434     return L2Normalization4dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.f, 0, 0.f, 0,
7435                                                                  layout);
7436 }
7437
7438 LayerTestResult<int16_t, 4> L2Normalization4dInt16Test(
7439     armnn::IWorkloadFactory& workloadFactory,
7440     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7441     const armnn::DataLayout layout)
7442 {
7443     return L2Normalization1dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.f, 0, 1.f, 0,
7444                                                                          layout);
7445 }
7446
7447 LayerTestResult<uint8_t, 4> L2Normalization4dUint8Test(
7448     armnn::IWorkloadFactory& workloadFactory,
7449     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7450     const armnn::DataLayout layout)
7451 {
7452     return L2Normalization1dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.f, 0,
7453                                                                          1.f/128, 128, layout);
7454 }
7455
7456 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
7457 LayerTestResult<T, 4> ConstantTestImpl(
7458     armnn::IWorkloadFactory& workloadFactory,
7459     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7460     float qScale,
7461     int32_t qOffset)
7462 {
7463     constexpr unsigned int inputWidth = 3;
7464     constexpr unsigned int inputHeight = 4;
7465     constexpr unsigned int inputChannels = 3;
7466     constexpr unsigned int inputBatchSize = 2;
7467
7468     constexpr unsigned int outputWidth = inputWidth;
7469     constexpr unsigned int outputHeight = inputHeight;
7470     constexpr unsigned int outputChannels = inputChannels;
7471     constexpr unsigned int outputBatchSize = inputBatchSize;
7472
7473     armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
7474                                         ArmnnType, qScale, qOffset);
7475
7476     armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
7477                                          ArmnnType, qScale, qOffset);
7478
7479     // Set quantization parameters if the requested type is a quantized type.
7480     if(armnn::IsQuantizedType<T>())
7481     {
7482         inputTensorInfo.SetQuantizationScale(qScale);
7483         inputTensorInfo.SetQuantizationOffset(qOffset);
7484         outputTensorInfo.SetQuantizationScale(qScale);
7485         outputTensorInfo.SetQuantizationOffset(qOffset);
7486     }
7487
7488     auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
7489         QuantizedVector<T>(qScale, qOffset, {
7490         // Batch 0, Channel 0
7491         235.0f,  46.0f, 178.0f,
7492         100.0f, 123.0f,  19.0f,
7493         172.0f,  74.0f, 250.0f,
7494           6.0f, 195.0f,  80.0f,
7495
7496         // Batch 0, Channel 1
7497         113.0f,  95.0f, 202.0f,
7498          77.0f, 114.0f,  71.0f,
7499         122.0f, 246.0f, 166.0f,
7500          82.0f,  28.0f,  37.0f,
7501
7502         // Batch 0, Channel 2
7503          56.0f, 170.0f, 162.0f,
7504         194.0f,  89.0f, 254.0f,
7505          12.0f, 209.0f, 200.0f,
7506           1.0f,  64.0f,  54.0f,
7507
7508         // Batch 1, Channel 0
7509          67.0f,  90.0f,  49.0f,
7510           7.0f, 163.0f,  18.0f,
7511          25.0f, 117.0f, 103.0f,
7512         247.0f,  59.0f, 189.0f,
7513
7514         // Batch 1, Channel 1
7515         239.0f, 104.0f, 199.0f,
7516          17.0f, 124.0f, 153.0f,
7517         222.0f, 217.0f, 75.0f,
7518          32.0f, 126.0f, 21.0f,
7519
7520         // Batch 1, Channel 2
7521          97.0f, 145.0f, 215.0f,
7522         115.0f, 116.0f, 238.0f,
7523         226.0f,  16.0f, 132.0f,
7524          92.0f, 125.0f,  88.0f,
7525     })));
7526
7527     LayerTestResult<T, 4> result(outputTensorInfo);
7528     result.outputExpected = input;
7529
7530     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7531
7532     armnn::ScopedCpuTensorHandle constantTensor(inputTensorInfo);
7533     AllocateAndCopyDataToITensorHandle(&constantTensor, &input[0][0][0][0]);
7534
7535     armnn::ConstantQueueDescriptor descriptor;
7536     descriptor.m_LayerOutput = &constantTensor;
7537
7538     armnn::WorkloadInfo info;
7539     AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
7540
7541     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConstant(descriptor, info);
7542
7543     outputHandle->Allocate();
7544
7545     workload->PostAllocationConfigure();
7546     workload->Execute();
7547
7548     CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
7549     return result;
7550 }
7551
7552 LayerTestResult<float, 4> ConstantTest(
7553     armnn::IWorkloadFactory& workloadFactory,
7554     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7555 {
7556     return ConstantTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
7557 }
7558
7559 LayerTestResult<int16_t, 4> ConstantInt16SimpleQuantizationScaleNoOffsetTest(
7560     armnn::IWorkloadFactory& workloadFactory,
7561     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7562 {
7563     return ConstantTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.0f, 0);
7564 }
7565
7566 LayerTestResult<uint8_t, 4> ConstantUint8SimpleQuantizationScaleNoOffsetTest(
7567     armnn::IWorkloadFactory& workloadFactory,
7568     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7569 {
7570     return ConstantTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
7571 }
7572
7573 LayerTestResult<uint8_t, 3> ConcatUint8DifferentQParamsTest(
7574         armnn::IWorkloadFactory& workloadFactory,
7575         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7576 {
7577     unsigned int outputWidth = 3;
7578     unsigned int outputHeight = 6;
7579     unsigned int outputChannels = 3;
7580
7581     unsigned int inputWidth1 = 3;
7582     unsigned int inputHeight1 = 6;
7583     unsigned int inputChannels1 = 2;
7584
7585     unsigned int inputWidth2 = 3;
7586     unsigned int inputHeight2 = 6;
7587     unsigned int inputChannels2 = 1;
7588
7589     // Defines the tensor descriptors.
7590     armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::QuantisedAsymm8);
7591     armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::QuantisedAsymm8);
7592     armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::QuantisedAsymm8);
7593
7594     // Quantized input1 tensor. Range [-3, 1]
7595     const float inputScale1 = 0.015686f;
7596     const int32_t inputOffset1 = 192;
7597
7598     auto input1 = MakeTensor<uint8_t, 3>(inputTensorInfo1, std::vector<uint8_t>(
7599     {
7600         1, 2, 3,
7601         4, 5, 6,
7602         7, 8, 9,
7603         10, 11, 12,
7604         13, 14, 15,
7605         16, 17, 18,
7606
7607         19, 20, 21,
7608         22, 23, 24,
7609         25, 26, 27,
7610         28, 29, 30,
7611         31, 32, 33,
7612         34, 35, 36,
7613     })
7614     );
7615
7616     // Quatized input2 tensor. Range [-1, 4]
7617     const float inputScale2 = 0.019608f;
7618     const int32_t inputOffset2 = 50;
7619
7620     auto input2 = MakeTensor<uint8_t, 3>(inputTensorInfo2, std::vector<uint8_t>(
7621     {
7622         37, 38, 39,
7623         40, 41, 42,
7624         43, 44, 45,
7625         46, 47, 48,
7626         49, 50, 51,
7627         52, 53, 54,
7628     })
7629     );
7630
7631     // Output has the same quantization parameters than input1,
7632     // so that only the requantization of input2 is required
7633     const float outputScale = 0.015686f;
7634     const int32_t outputOffset = 192;
7635
7636     LayerTestResult<uint8_t, 3> ret(outputTensorInfo);
7637
7638     ret.outputExpected = MakeTensor<uint8_t, 3>(outputTensorInfo, std::vector<uint8_t>(
7639     {
7640         1, 2, 3,
7641         4, 5, 6,
7642         7, 8, 9,
7643         10, 11, 12,
7644         13, 14, 15,
7645         16, 17, 18,
7646
7647         19, 20, 21,
7648         22, 23, 24,
7649         25, 26, 27,
7650         28, 29, 30,
7651         31, 32, 33,
7652         34, 35, 36,
7653
7654         176, 177, 178,
7655         179, 181, 182,
7656         183, 184, 186,
7657         187, 188, 189,
7658         191, 192, 193,
7659         195, 196, 197,
7660     })
7661     );
7662
7663     outputTensorInfo.SetQuantizationScale(outputScale);
7664     outputTensorInfo.SetQuantizationOffset(outputOffset);
7665     inputTensorInfo1.SetQuantizationScale(inputScale1);
7666     inputTensorInfo1.SetQuantizationOffset(inputOffset1);
7667     inputTensorInfo2.SetQuantizationScale(inputScale2);
7668     inputTensorInfo2.SetQuantizationOffset(inputOffset2);
7669
7670     std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
7671     armnn::ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
7672
7673     std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
7674     armnn::ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
7675
7676     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7677
7678     bool subTensorsSupported = workloadFactory.SupportsSubTensors();
7679
7680     std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
7681             subTensorsSupported ?
7682             workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
7683             workloadFactory.CreateTensorHandle(inputTensorInfo1);
7684
7685     std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
7686             subTensorsSupported ?
7687             workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
7688             workloadFactory.CreateTensorHandle(inputTensorInfo2);
7689
7690     armnn::ConcatQueueDescriptor data;
7691     armnn::WorkloadInfo info;
7692     AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
7693     AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
7694     AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
7695
7696     data.m_ViewOrigins.push_back(window1);
7697     data.m_ViewOrigins.push_back(window2);
7698
7699     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConcat(data, info);
7700
7701     inputHandle1->Allocate();
7702     inputHandle2->Allocate();
7703     outputHandle->Allocate();
7704
7705     CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
7706     CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
7707
7708     workload->PostAllocationConfigure();
7709     workload->Execute();
7710
7711     CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
7712
7713     return ret;
7714 }
7715
7716 LayerTestResult<uint8_t, 3> ConcatUint8Test(
7717     armnn::IWorkloadFactory& workloadFactory,
7718     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7719 {
7720     unsigned int outputWidth = 3;
7721     unsigned int outputHeight = 6;
7722     unsigned int outputChannels = 3;
7723
7724     unsigned int inputWidth1 = 3;
7725     unsigned int inputHeight1 = 6;
7726     unsigned int inputChannels1 = 2;
7727
7728     unsigned int inputWidth2 = 3;
7729     unsigned int inputHeight2 = 6;
7730     unsigned int inputChannels2 = 1;
7731
7732     // Defines the tensor descriptors.
7733     armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::QuantisedAsymm8);
7734     armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::QuantisedAsymm8);
7735     armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::QuantisedAsymm8);
7736
7737     // Arbitrary scale and offsets. They don't really matter as the Concat operator doesn't dequantize/quantize them.
7738     const float scale = 0.13497836f;
7739     const int32_t offset = -7;
7740
7741     outputTensorInfo.SetQuantizationScale(scale);
7742     outputTensorInfo.SetQuantizationOffset(offset);
7743     inputTensorInfo1.SetQuantizationScale(scale);
7744     inputTensorInfo1.SetQuantizationOffset(offset);
7745     inputTensorInfo2.SetQuantizationScale(scale);
7746     inputTensorInfo2.SetQuantizationOffset(offset);
7747
7748     LayerTestResult<uint8_t, 3> ret(outputTensorInfo);
7749
7750     ret.outputExpected = MakeTensor<uint8_t, 3>(outputTensorInfo, std::vector<uint8_t>(
7751         {
7752             1, 2, 3,
7753             4, 5, 6,
7754             7, 8, 9,
7755             10, 11, 12,
7756             13, 14, 15,
7757             16, 17, 18,
7758
7759             19, 20, 21,
7760             22, 23, 24,
7761             25, 26, 27,
7762             28, 29, 30,
7763             31, 32, 33,
7764             34, 35, 36,
7765
7766             37, 38, 39,
7767             40, 41, 42,
7768             43, 44, 45,
7769             46, 47, 48,
7770             49, 50, 51,
7771             52, 53, 54,
7772         })
7773     );
7774
7775     auto input1 = MakeTensor<uint8_t, 3>(inputTensorInfo1, std::vector<uint8_t>(
7776     {
7777         1, 2, 3,
7778         4, 5, 6,
7779         7, 8, 9,
7780         10, 11, 12,
7781         13, 14, 15,
7782         16, 17, 18,
7783
7784         19, 20, 21,
7785         22, 23, 24,
7786         25, 26, 27,
7787         28, 29, 30,
7788         31, 32, 33,
7789         34, 35, 36,
7790     })
7791     );
7792
7793     auto input2 = MakeTensor<uint8_t, 3>(inputTensorInfo2, std::vector<uint8_t>(
7794     {
7795         37, 38, 39,
7796         40, 41, 42,
7797         43, 44, 45,
7798         46, 47, 48,
7799         49, 50, 51,
7800         52, 53, 54,
7801     })
7802     );
7803
7804     std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
7805     armnn::ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
7806
7807     std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
7808     armnn::ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
7809
7810
7811     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7812
7813     bool subTensorsSupported = workloadFactory.SupportsSubTensors();
7814
7815     std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
7816         subTensorsSupported ?
7817             workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
7818             workloadFactory.CreateTensorHandle(inputTensorInfo1);
7819
7820     std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
7821         subTensorsSupported ?
7822             workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
7823             workloadFactory.CreateTensorHandle(inputTensorInfo2);
7824
7825
7826     armnn::ConcatQueueDescriptor data;
7827     armnn::WorkloadInfo info;
7828     AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
7829     AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
7830     AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
7831
7832     data.m_ViewOrigins.push_back(window1);
7833     data.m_ViewOrigins.push_back(window2);
7834
7835     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConcat(data, info);
7836
7837     inputHandle1->Allocate();
7838     inputHandle2->Allocate();
7839     outputHandle->Allocate();
7840
7841     CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
7842     CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
7843
7844     workload->PostAllocationConfigure();
7845     workload->Execute();
7846
7847     CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
7848
7849     return ret;
7850 }
7851
7852 LayerTestResult<uint16_t, 3> ConcatUint16Test(
7853         armnn::IWorkloadFactory& workloadFactory,
7854         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7855 {
7856     unsigned int outputWidth = 3;
7857     unsigned int outputHeight = 6;
7858     unsigned int outputChannels = 3;
7859
7860     unsigned int inputWidth1 = 3;
7861     unsigned int inputHeight1 = 6;
7862     unsigned int inputChannels1 = 2;
7863
7864     unsigned int inputWidth2 = 3;
7865     unsigned int inputHeight2 = 6;
7866     unsigned int inputChannels2 = 1;
7867
7868     // Defines the tensor descriptors.
7869     armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::QuantisedSymm16);
7870     armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::QuantisedSymm16);
7871     armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::QuantisedSymm16);
7872
7873     // Arbitrary scale and offsets. They don't really matter as the Concat operator doesn't dequantize/quantize them.
7874     const float scale = 0.13497836f;
7875     const int32_t offset = -7;
7876
7877     outputTensorInfo.SetQuantizationScale(scale);
7878     outputTensorInfo.SetQuantizationOffset(offset);
7879     inputTensorInfo1.SetQuantizationScale(scale);
7880     inputTensorInfo1.SetQuantizationOffset(offset);
7881     inputTensorInfo2.SetQuantizationScale(scale);
7882     inputTensorInfo2.SetQuantizationOffset(offset);
7883
7884     LayerTestResult<uint16_t, 3> ret(outputTensorInfo);
7885
7886     ret.outputExpected = MakeTensor<uint16_t, 3>(outputTensorInfo, std::vector<uint16_t>(
7887     {
7888         1, 2, 3,
7889         4, 5, 6,
7890         7, 8, 9,
7891         10, 11, 12,
7892         13, 14, 15,
7893         16, 17, 18,
7894
7895         19, 20, 21,
7896         22, 23, 24,
7897         25, 26, 27,
7898         28, 29, 30,
7899         31, 32, 33,
7900         34, 35, 36,
7901
7902         37, 38, 39,
7903         40, 41, 42,
7904         43, 44, 45,
7905         46, 47, 48,
7906         49, 50, 51,
7907         52, 53, 54,
7908     }));
7909
7910     auto input1 = MakeTensor<uint16_t, 3>(inputTensorInfo1, std::vector<uint16_t>(
7911     {
7912         1, 2, 3,
7913         4, 5, 6,
7914         7, 8, 9,
7915         10, 11, 12,
7916         13, 14, 15,
7917         16, 17, 18,
7918
7919         19, 20, 21,
7920         22, 23, 24,
7921         25, 26, 27,
7922         28, 29, 30,
7923         31, 32, 33,
7924         34, 35, 36,
7925     }));
7926
7927     auto input2 = MakeTensor<uint16_t, 3>(inputTensorInfo2, std::vector<uint16_t>(
7928     {
7929         37, 38, 39,
7930         40, 41, 42,
7931         43, 44, 45,
7932         46, 47, 48,
7933         49, 50, 51,
7934         52, 53, 54,
7935     }));
7936
7937     std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
7938     armnn::ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
7939
7940     std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
7941     armnn::ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
7942
7943
7944     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7945
7946     bool subTensorsSupported = workloadFactory.SupportsSubTensors();
7947
7948     std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
7949             subTensorsSupported ?
7950             workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
7951             workloadFactory.CreateTensorHandle(inputTensorInfo1);
7952
7953     std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
7954             subTensorsSupported ?
7955             workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
7956             workloadFactory.CreateTensorHandle(inputTensorInfo2);
7957
7958
7959     armnn::ConcatQueueDescriptor data;
7960     armnn::WorkloadInfo info;
7961     AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
7962     AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
7963     AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
7964
7965     data.m_ViewOrigins.push_back(window1);
7966     data.m_ViewOrigins.push_back(window2);
7967
7968     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConcat(data, info);
7969
7970     inputHandle1->Allocate();
7971     inputHandle2->Allocate();
7972     outputHandle->Allocate();
7973
7974     CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
7975     CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
7976
7977     workload->PostAllocationConfigure();
7978     workload->Execute();
7979
7980     CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
7981
7982     return ret;
7983 }
7984
7985 namespace
7986 {
7987 template <typename T>
7988 LayerTestResult<T, 4> AdditionQuantizeTestHelper(
7989     armnn::IWorkloadFactory& workloadFactory,
7990     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7991     const unsigned int shape0[4],
7992     const std::vector<T>& values0,
7993     float scale0,
7994     int32_t offset0,
7995     const unsigned int shape1[4],
7996     const std::vector<T> & values1,
7997     float scale1,
7998     int32_t offset1,
7999     const unsigned int outShape[4],
8000     const std::vector<T> & outValues,
8001     float outScale,
8002     int32_t outOffset)
8003 {
8004     auto dataType = (std::is_same<T, uint8_t>::value ?
8005                      armnn::DataType::QuantisedAsymm8 :
8006                      armnn::DataType::QuantisedSymm16);
8007
8008     armnn::TensorInfo inputTensorInfo0(4, shape0, dataType);
8009     armnn::TensorInfo inputTensorInfo1(4, shape1, dataType);
8010     armnn::TensorInfo outputTensorInfo(4, outShape, dataType);
8011
8012     inputTensorInfo0.SetQuantizationScale(scale0);
8013     inputTensorInfo0.SetQuantizationOffset(offset0);
8014
8015     inputTensorInfo1.SetQuantizationScale(scale1);
8016     inputTensorInfo1.SetQuantizationOffset(offset1);
8017
8018     outputTensorInfo.SetQuantizationScale(outScale);
8019     outputTensorInfo.SetQuantizationOffset(outOffset);
8020
8021     auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
8022     auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
8023
8024     LayerTestResult<T, 4> result(outputTensorInfo);
8025     result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
8026
8027     std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
8028     std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
8029     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
8030
8031     armnn::AdditionQueueDescriptor data;
8032     armnn::WorkloadInfo info;
8033     AddInputToWorkload(data,  info, inputTensorInfo0, inputHandle0.get());
8034     AddInputToWorkload(data,  info, inputTensorInfo1, inputHandle1.get());
8035     AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
8036
8037     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
8038
8039     inputHandle0->Allocate();
8040     inputHandle1->Allocate();
8041     outputHandle->Allocate();
8042
8043     CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
8044     CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
8045
8046     workload->PostAllocationConfigure();
8047     workload->Execute();
8048
8049     CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
8050
8051     return result;
8052 }
8053 } // anonymous namespace
8054
8055 LayerTestResult<uint8_t, 4> AdditionUint8Test(
8056     armnn::IWorkloadFactory& workloadFactory,
8057     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8058 {
8059     const unsigned int shape0[] = { 1, 2, 2, 3 };
8060     const unsigned int shape1[] = { 1, 2, 2, 3 };
8061
8062     std::vector<uint8_t> input0(
8063     {
8064         63,  35,  77,  70,  56, 112, //  420, 224,  518,  469,  371, 763
8065         203,  28, 252, 168, 245,  91  // 1400, 175, 1743, 1155, 1694, 616
8066     });
8067
8068     std::vector<uint8_t> input1(
8069     {
8070         21,   7, 175, 231, 175, 210, // 126,   28, 1204, 1596, 1204, 1449
8071         126, 161,  63,  21, 105, 126  // 861, 1106,  420,  126,  714,  861
8072     });
8073
8074     std::vector<uint8_t> output(
8075     {
8076         81,  39, 249, 255, 228, 255, //  546,  252, 1722, 2065(clamped), 1575, 2212(clamped)
8077         255, 186, 255, 186, 255, 214, // 2261(clamped), 1281, 2163(clamped), 1281, 2408(clamped), 1477
8078     });
8079
8080     return AdditionQuantizeTestHelper(workloadFactory,
8081                                       memoryManager,
8082                                       shape0, input0, 7.0f, 3,
8083                                       shape1, input1, 7.0f, 3,
8084                                       shape0, output, 7.0f, 3);
8085 }
8086
8087 LayerTestResult<int16_t, 4> AdditionInt16Test(
8088     armnn::IWorkloadFactory& workloadFactory,
8089     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8090 {
8091     const unsigned int shape0[] = { 1, 2, 2, 3 };
8092     const unsigned int shape1[] = { 1, 2, 2, 3 };
8093
8094     std::vector<int16_t> input0(
8095         {
8096             63,  35,  77,  70,  56, 112, //  441, 245,  539,  490,  392, 184
8097             203,  28, 252, 168, 245,  91  // 1421, 196, 1764, 1176, 1715, 637
8098         });
8099
8100     std::vector<int16_t> input1(
8101         {
8102             21,   7, 175, 231, 175, 210, // 126,   28, 1204, 1596, 1204, 1449
8103             126, 161,  63,  21, 105, 126  // 861, 1106,  420,  126,  714,  861
8104         });
8105
8106     std::vector<int16_t> output(
8107         {
8108             84,  42, 252, 301, 231, 322, //  588,  294, 1764, 2107(clamped), 1617, 2254(clamped)
8109             329, 189, 315, 189, 350, 217, // 2303(clamped), 1323, 2205(clamped), 1323, 2450(clamped), 1519
8110         });
8111
8112     return AdditionQuantizeTestHelper(workloadFactory,
8113                                       memoryManager,
8114                                       shape0, input0, 7.0f, 0,
8115                                       shape1, input1, 7.0f, 0,
8116                                       shape0, output, 7.0f, 0);
8117 }
8118
8119 namespace
8120 {
8121 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
8122 LayerTestResult<T, 4> MultiplicationQuantizeTestHelper(
8123     armnn::IWorkloadFactory& workloadFactory,
8124     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8125     const unsigned int shape0[4],
8126     const std::vector<T> & values0,
8127     float scale0,
8128     int32_t offset0,
8129     const unsigned int shape1[4],
8130     const std::vector<T> & values1,
8131     float scale1,
8132     int32_t offset1,
8133     const unsigned int outShape[4],
8134     const std::vector<T> & outValues,
8135     float outScale,
8136     int32_t outOffset)
8137 {
8138     armnn::TensorInfo inputTensorInfo0(4, shape0, ArmnnType);
8139     armnn::TensorInfo inputTensorInfo1(4, shape1, ArmnnType);
8140     armnn::TensorInfo outputTensorInfo(4, outShape, ArmnnType);
8141
8142     inputTensorInfo0.SetQuantizationScale(scale0);
8143     inputTensorInfo0.SetQuantizationOffset(offset0);
8144
8145     inputTensorInfo1.SetQuantizationScale(scale1);
8146     inputTensorInfo1.SetQuantizationOffset(offset1);
8147
8148     outputTensorInfo.SetQuantizationScale(outScale);
8149     outputTensorInfo.SetQuantizationOffset(outOffset);
8150
8151     auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
8152     auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
8153
8154     LayerTestResult<T, 4> result(outputTensorInfo);
8155     result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
8156
8157     std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
8158     std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
8159     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
8160
8161     armnn::MultiplicationQueueDescriptor data;
8162     armnn::WorkloadInfo info;
8163     AddInputToWorkload(data,  info, inputTensorInfo0, inputHandle0.get());
8164     AddInputToWorkload(data,  info, inputTensorInfo1, inputHandle1.get());
8165     AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
8166
8167     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
8168
8169     inputHandle0->Allocate();
8170     inputHandle1->Allocate();
8171     outputHandle->Allocate();
8172
8173     CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
8174     CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
8175
8176     workload->PostAllocationConfigure();
8177     workload->Execute();
8178
8179     CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
8180
8181     return result;
8182 }
8183 } // anonymous namespace
8184
8185 LayerTestResult<uint8_t, 4> MultiplicationUint8Test(
8186     armnn::IWorkloadFactory& workloadFactory,
8187     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8188 {
8189     unsigned int batchSize = 1;
8190     unsigned int channels = 2;
8191     unsigned int height = 2;
8192     unsigned int width = 3;
8193     const unsigned int shape[] = { batchSize, channels, height, width };
8194
8195     // See dequantized values to the right.
8196     std::vector<uint8_t> input0({
8197          62,  37,   3, 172,  13, 111, // 244, 144,   8, 684,  48, 440,
8198         188,  20,  73,  31,  23,  31  // 748,  76, 288, 120,  88, 120
8199     });
8200
8201     // See dequantized values to the right.
8202     std::vector<uint8_t> input1({
8203         126, 240, 252, 183, 121, 247, // 384, 726, 762, 555, 369, 747,
8204          48, 115, 151,  79,  78,  97  // 150, 351, 459, 243, 240, 297
8205     });
8206
8207     // See dequantized values to the right.
8208     std::vector<uint8_t> output(
8209     {
8210          64,  72,   0, 255,   8, 236, //  93696, 104544, 6096(clamped), 379620(clamped), 17712, 328680,
8211          77,  15,  92,  16,  10,  21, // 112200,  26676,        132192,           29160, 21120,  35640
8212     });
8213
8214     // Scale/offset chosen to have output values out of range.
8215     return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
8216                                                                               memoryManager,
8217                                                                               shape,
8218                                                                               input0,
8219                                                                               4.0f,
8220                                                                               1,
8221                                                                               shape,
8222                                                                               input1,
8223                                                                               3.0f,
8224                                                                               -2,
8225                                                                               shape,
8226                                                                               output,
8227                                                                               1366.255f,
8228                                                                               -5);
8229 }
8230
8231 LayerTestResult<uint8_t, 4> MultiplicationBroadcast1ElementUint8Test(
8232     armnn::IWorkloadFactory& workloadFactory,
8233     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8234 {
8235     const unsigned int shape0[] = { 1, 2, 2, 3 };
8236     const unsigned int shape1[] = { 1, 1, 1, 1 };
8237
8238     std::vector<uint8_t> input0({
8239         1, 2, 3,    4,  5,  6,
8240         7, 8, 9,   10, 11, 12
8241     });
8242
8243     std::vector<uint8_t> input1({2});
8244
8245     std::vector<uint8_t> output({
8246         2,  4,   6,     8, 10, 12,
8247         14, 16, 18,    20, 22, 24
8248     });
8249
8250     return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
8251                                                                               memoryManager,
8252                                                                               shape0,
8253                                                                               input0,
8254                                                                               1.0f,
8255                                                                               0,
8256                                                                               shape1,
8257                                                                               input1,
8258                                                                               1.0f,
8259                                                                               0,
8260                                                                               shape0,
8261                                                                               output,
8262                                                                               1.0f,
8263                                                                               0);
8264 }
8265
8266 LayerTestResult<uint8_t, 4> MultiplicationBroadcast1DVectorUint8Test(
8267     armnn::IWorkloadFactory& workloadFactory,
8268     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8269 {
8270     const unsigned int shape0[] = { 1, 2, 2, 3 };
8271     const unsigned int shape1[] = { 1, 1, 1, 3 };
8272
8273     std::vector<uint8_t> input0({
8274         1, 2, 3,    4,  5,  6,
8275         7, 8, 9,   10, 11, 12
8276     });
8277
8278     std::vector<uint8_t> input1({1, 2, 3});
8279
8280     std::vector<uint8_t> output({
8281         1,  4,   9,     4, 10, 18,
8282         7, 16,  27,    10, 22, 36
8283     });
8284
8285     return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
8286                                                                               memoryManager,
8287                                                                               shape0,
8288                                                                               input0,
8289                                                                               1.0f,
8290                                                                               0,
8291                                                                               shape1,
8292                                                                               input1,
8293                                                                               1.0f,
8294                                                                               0,
8295                                                                               shape0,
8296                                                                               output,
8297                                                                               1.0f,
8298                                                                               0);
8299 }
8300
8301 LayerTestResult<int16_t, 4> MultiplicationInt16Test(
8302     armnn::IWorkloadFactory& workloadFactory,
8303     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8304 {
8305     const unsigned int shape[] = { 1, 2, 2, 3 };
8306
8307     std::vector<int16_t> input0(
8308     {
8309         6,   7,  8,  9, 10, 11,
8310         12, 13, 14, 15, 16, 17
8311     });
8312
8313     std::vector<int16_t> input1(
8314     {
8315         1, 2, 3,  4,  5,  6,
8316         7, 8, 9, 10, 11, 12
8317     });
8318
8319     std::vector<int16_t> output(
8320     {
8321         6,   14,  24,  36,  50,  66,
8322         84, 104, 126, 150, 176, 204
8323     });
8324
8325     return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
8326                                                                               memoryManager,
8327                                                                               shape,
8328                                                                               input0,
8329                                                                               1.0f,
8330                                                                               0,
8331                                                                               shape,
8332                                                                               input1,
8333                                                                               1.0f,
8334                                                                               0,
8335                                                                               shape,
8336                                                                               output,
8337                                                                               1.0f,
8338                                                                               0);
8339 }
8340
8341 LayerTestResult<int16_t, 4> MultiplicationBroadcast1ElementInt16Test(
8342     armnn::IWorkloadFactory& workloadFactory,
8343     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8344 {
8345     const unsigned int shape0[] = { 1, 2, 2, 3 };
8346     const unsigned int shape1[] = { 1, 1, 1, 1 };
8347
8348     std::vector<int16_t> input0(
8349     {
8350         1, 2, 3,  4,  5,  6,
8351         7, 8, 9, 10, 11, 12
8352     });
8353
8354     std::vector<int16_t> input1({2});
8355
8356     std::vector<int16_t> output(
8357     {
8358         2,   4,  6,  8, 10, 12,
8359         14, 16, 18, 20, 22, 24
8360     });
8361
8362     return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
8363                                                                               memoryManager,
8364                                                                               shape0,
8365                                                                               input0,
8366                                                                               1.0f,
8367                                                                               0,
8368                                                                               shape1,
8369                                                                               input1,
8370                                                                               1.0f,
8371                                                                               0,
8372                                                                               shape0,
8373                                                                               output,
8374                                                                               1.0f,
8375                                                                               0);
8376 }
8377
8378 LayerTestResult<int16_t, 4> MultiplicationBroadcast1DVectorInt16Test(
8379     armnn::IWorkloadFactory& workloadFactory,
8380     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8381 {
8382     const unsigned int shape0[] = { 1, 2, 2, 3 };
8383     const unsigned int shape1[] = { 1, 1, 1, 3 };
8384
8385     std::vector<int16_t> input0(
8386     {
8387         1, 2, 3,  4,  5,  6,
8388         7, 8, 9, 10, 11, 12
8389     });
8390
8391     std::vector<int16_t> input1({1, 2, 3});
8392
8393     std::vector<int16_t> output(
8394     {
8395         1,  4,  9,  4, 10, 18,
8396         7, 16, 27, 10, 22, 36
8397     });
8398
8399     return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
8400                                                                               memoryManager,
8401                                                                               shape0,
8402                                                                               input0,
8403                                                                               1.0f,
8404                                                                               0,
8405                                                                               shape1,
8406                                                                               input1,
8407                                                                               1.0f,
8408                                                                               0,
8409                                                                               shape0,
8410                                                                               output,
8411                                                                               1.0f,
8412                                                                               0);
8413 }
8414
8415 namespace
8416 {
8417 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
8418 LayerTestResult<T, 4> SubtractionTestHelper(
8419     armnn::IWorkloadFactory& workloadFactory,
8420     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8421     const unsigned int shape0[4],
8422     const std::vector<T>& values0,
8423     float scale0,
8424     int32_t offset0,
8425     const unsigned int shape1[4],
8426     const std::vector<T> & values1,
8427     float scale1,
8428     int32_t offset1,
8429     const unsigned int outShape[4],
8430     const std::vector<T> & outValues,
8431     float outScale,
8432     int32_t outOffset)
8433 {
8434     armnn::TensorInfo inputTensorInfo0(4, shape0, ArmnnType);
8435     armnn::TensorInfo inputTensorInfo1(4, shape1, ArmnnType);
8436     armnn::TensorInfo outputTensorInfo(4, outShape, ArmnnType);
8437
8438     inputTensorInfo0.SetQuantizationScale(scale0);
8439     inputTensorInfo0.SetQuantizationOffset(offset0);
8440
8441     inputTensorInfo1.SetQuantizationScale(scale1);
8442     inputTensorInfo1.SetQuantizationOffset(offset1);
8443
8444     outputTensorInfo.SetQuantizationScale(outScale);
8445     outputTensorInfo.SetQuantizationOffset(outOffset);
8446
8447     auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
8448     auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
8449
8450     LayerTestResult<T, 4> result(outputTensorInfo);
8451     result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
8452
8453     std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
8454     std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
8455     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
8456
8457     armnn::SubtractionQueueDescriptor data;
8458     armnn::WorkloadInfo info;
8459     AddInputToWorkload(data,  info, inputTensorInfo0, inputHandle0.get());
8460     AddInputToWorkload(data,  info, inputTensorInfo1, inputHandle1.get());
8461     AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
8462
8463     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateSubtraction(data, info);
8464
8465     inputHandle0->Allocate();
8466     inputHandle1->Allocate();
8467     outputHandle->Allocate();
8468
8469     CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
8470     CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
8471
8472     workload->PostAllocationConfigure();
8473     workload->Execute();
8474
8475     CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
8476
8477     return result;
8478 }
8479 } // anonymous namespace
8480
8481 LayerTestResult<uint8_t, 4> SubtractionUint8Test(
8482     armnn::IWorkloadFactory& workloadFactory,
8483     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8484 {
8485     const unsigned int shape0[] = { 1, 1, 2, 2 };
8486     const unsigned int shape1[] = { 1, 1, 2, 2 };
8487
8488     std::vector<uint8_t> input0({ 10, 12, 14, 16 });
8489     std::vector<uint8_t> input1({ 1, 2, 1, 2 });
8490     std::vector<uint8_t> output({ 3, 3, 5, 5 });
8491
8492     return SubtractionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
8493                                                                    memoryManager,
8494                                                                    shape0, input0, 0.5f, 2,
8495                                                                    shape1, input1, 1.0f, 0,
8496                                                                    shape0, output, 1.0f, 0);
8497 }
8498
8499 LayerTestResult<uint8_t, 4> SubtractionBroadcast1ElementUint8Test(
8500     armnn::IWorkloadFactory& workloadFactory,
8501     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8502 {
8503     const unsigned int shape0[] = { 1, 1, 2, 2 };
8504     const unsigned int shape1[] = { 1, 1, 1, 1 };
8505
8506     std::vector<uint8_t> input0({ 10, 12, 14, 16 });
8507     std::vector<uint8_t> input1({ 2 });
8508     std::vector<uint8_t> output({ 5, 6, 7, 8 });
8509
8510     return SubtractionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
8511                                                                    memoryManager,
8512                                                                    shape0, input0, 0.5f, 2,
8513                                                                    shape1, input1, 1.0f, 0,
8514                                                                    shape0, output, 1.0f, 3);
8515 }
8516
8517 LayerTestResult<uint8_t, 4> SubtractionBroadcastUint8Test(
8518     armnn::IWorkloadFactory& workloadFactory,
8519     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8520 {
8521     const unsigned int shape0[] = { 1, 1, 2, 2 };
8522     const unsigned int shape1[] = { 1, 1, 2, 1 };
8523
8524     std::vector<uint8_t> input0({ 10, 12, 14, 16 });
8525     std::vector<uint8_t> input1({ 2, 1 });
8526     std::vector<uint8_t> output({ 8, 11, 12, 15 });
8527
8528     return SubtractionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
8529                                                                    memoryManager,
8530                                                                    shape0, input0, 1.0f, 0,
8531                                                                    shape1, input1, 1.0f, 0,
8532                                                                    shape0, output, 1.0f, 0);
8533 }
8534
8535 LayerTestResult<float, 4> SubtractionTest(
8536     armnn::IWorkloadFactory& workloadFactory,
8537     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8538 {
8539     const unsigned int shape0[] = { 1, 1, 2, 2 };
8540     const unsigned int shape1[] = { 1, 1, 2, 2 };
8541
8542     std::vector<float> input0({ 1,  2, 3, 4 });
8543     std::vector<float> input1({ 1, -1, 0, 2 });
8544     std::vector<float> output({ 0,  3, 3, 2 });
8545
8546     return SubtractionTestHelper<armnn::DataType::Float32>(workloadFactory,
8547                                                            memoryManager,
8548                                                            shape0, input0, 1.0f, 0,
8549                                                            shape1, input1, 1.0f, 0,
8550                                                            shape0, output, 1.0f, 0);
8551 }
8552
8553 LayerTestResult<float, 4> SubtractionBroadcast1ElementTest(
8554     armnn::IWorkloadFactory& workloadFactory,
8555     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8556 {
8557     const unsigned int shape0[] = { 1, 1, 2, 2 };
8558     const unsigned int shape1[] = { 1, 1, 1, 1 };
8559
8560     std::vector<float> input0({ 1,  2, 3, 4 });
8561     std::vector<float> input1({ 10 });
8562     std::vector<float> output({ -9,  -8, -7, -6 });
8563
8564     return SubtractionTestHelper<armnn::DataType::Float32>(workloadFactory,
8565                                                            memoryManager,
8566                                                            shape0, input0, 1.0f, 0,
8567                                                            shape1, input1, 1.0f, 0,
8568                                                            shape0, output, 1.0f, 0);
8569 }
8570
8571 LayerTestResult<float, 4> SubtractionBroadcastTest(
8572     armnn::IWorkloadFactory& workloadFactory,
8573     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8574 {
8575     const unsigned int shape0[] = { 1, 1, 2, 2 };
8576     const unsigned int shape1[] = { 1, 1, 1, 2 };
8577
8578     std::vector<float> input0({ 1,  2, 3, 4 });
8579     std::vector<float> input1({ 10, -5 });
8580     std::vector<float> output({ -9,  7, -7, 9 });
8581
8582     return SubtractionTestHelper<armnn::DataType::Float32>(workloadFactory,
8583                                                            memoryManager,
8584                                                            shape0, input0, 1.0f, 0,
8585                                                            shape1, input1, 1.0f, 0,
8586                                                            shape0, output, 1.0f, 0);
8587 }
8588
8589 LayerTestResult<int16_t, 4> SubtractionInt16Test(
8590     armnn::IWorkloadFactory& workloadFactory,
8591     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8592 {
8593     const unsigned int shape0[] = { 1, 1, 2, 2 };
8594     const unsigned int shape1[] = { 1, 1, 2, 2 };
8595
8596     std::vector<int16_t> input0({ 10, 12, 14, 16 });
8597     std::vector<int16_t> input1({ 1, 2, 1, 2 });
8598     std::vector<int16_t> output({ 3, 3, 5, 5 });
8599
8600     return SubtractionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
8601                                                                    memoryManager,
8602                                                                    shape0, input0, 0.5f, 0,
8603                                                                    shape1, input1, 1.0f, 0,
8604                                                                    shape0, output, 1.0f, 0);
8605 }
8606
8607 LayerTestResult<int16_t, 4> SubtractionBroadcast1ElementInt16Test(
8608     armnn::IWorkloadFactory& workloadFactory,
8609     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8610 {
8611     const unsigned int shape0[] = { 1, 1, 2, 2 };
8612     const unsigned int shape1[] = { 1, 1, 1, 1 };
8613
8614     std::vector<int16_t> input0({ 10, 12, 14, 16 });
8615     std::vector<int16_t> input1({ 2 });
8616     std::vector<int16_t> output({ 3, 4, 5, 6 });
8617
8618     return SubtractionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
8619                                                                    memoryManager,
8620                                                                    shape0, input0, 0.5f, 0,
8621                                                                    shape1, input1, 1.0f, 0,
8622                                                                    shape0, output, 1.0f, 0);
8623 }
8624
8625 LayerTestResult<int16_t, 4> SubtractionBroadcastInt16Test(
8626     armnn::IWorkloadFactory& workloadFactory,
8627     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8628 {
8629     const unsigned int shape0[] = { 1, 1, 2, 2 };
8630     const unsigned int shape1[] = { 1, 1, 2, 1 };
8631
8632     std::vector<int16_t> input0({ 10, 12, 14, 16 });
8633     std::vector<int16_t> input1({ 2, 1 });
8634     std::vector<int16_t> output({ 8, 11, 12, 15 });
8635
8636     return SubtractionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
8637                                                                    memoryManager,
8638                                                                    shape0, input0, 1.0f, 0,
8639                                                                    shape1, input1, 1.0f, 0,
8640                                                                    shape0, output, 1.0f, 0);
8641 }
8642
8643 LayerTestResult<float, 4> BatchNormTest(
8644     armnn::IWorkloadFactory& workloadFactory,
8645     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8646 {
8647     // BatchSize: 1
8648     // Channels: 2
8649     // Height: 3
8650     // Width: 2
8651
8652     const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
8653     std::vector<float> inputValues
8654     {
8655         // Batch 0, Channel 0, Height (3) x Width (2)
8656          1.f, 4.f,
8657          4.f, 2.f,
8658          1.f, 6.f,
8659
8660         // Batch 0, Channel 1, Height (3) x Width (2)
8661          1.f, 1.f,
8662          4.f, 1.f,
8663         -2.f, 4.f
8664     };
8665     std::vector<float> expectedOutputValues
8666     {
8667         // Batch 0, Channel 0, Height (3) x Width (2)
8668         1.f, 4.f,
8669         4.f, 2.f,
8670         1.f, 6.f,
8671
8672         // Batch 0, Channel 1, Height (3) x Width (2)
8673         3.f, 3.f,
8674         4.f, 3.f,
8675         2.f, 4.f
8676     };
8677
8678     return BatchNormTestImpl<armnn::DataType::Float32>(
8679         workloadFactory, memoryManager,
8680         inputOutputShape, inputValues, expectedOutputValues,
8681         0.f, 0, armnn::DataLayout::NCHW);
8682 }
8683
8684 LayerTestResult<float, 4> BatchNormNhwcTest(
8685     armnn::IWorkloadFactory& workloadFactory,
8686     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8687 {
8688     // BatchSize: 1
8689     // Height: 3
8690     // Width: 2
8691     // Channels: 2
8692
8693     const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
8694     std::vector<float> inputValues
8695     {
8696         // Batch 0, Height 0, Width (2) x Channel (2)
8697         1.f,  1.f,
8698         4.f,  1.f,
8699
8700         // Batch 0, Height 1, Width (2) x Channel (2)
8701         4.f,  4.f,
8702         2.f,  1.f,
8703
8704         // Batch 0, Height 2, Width (2) x Channel (2)
8705         1.f, -2.f,
8706         6.f,  4.f
8707     };
8708     std::vector<float> expectedOutputValues
8709     {
8710         // Batch 0, Height 0, Width (2) x Channel (2)
8711         1.f, 3.f,
8712         4.f, 3.f,
8713
8714         // Batch 0, Height 1, Width (2) x Channel (2)
8715         4.f, 4.f,
8716         2.f, 3.f,
8717
8718         // Batch 0, Height 2, Width (2) x Channel (2)
8719         1.f, 2.f,
8720         6.f, 4.f
8721     };
8722
8723     return BatchNormTestImpl<armnn::DataType::Float32>(
8724         workloadFactory, memoryManager,
8725         inputOutputShape, inputValues, expectedOutputValues,
8726         0.f, 0, armnn::DataLayout::NHWC);
8727 }
8728
8729 LayerTestResult<uint8_t, 4> BatchNormUint8Test(
8730     armnn::IWorkloadFactory& workloadFactory,
8731     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8732 {
8733     // BatchSize: 1
8734     // Channels: 2
8735     // Height: 3
8736     // Width: 2
8737
8738     const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
8739     std::vector<float> inputValues
8740     {
8741         // Batch 0, Channel 0, Height (3) x Width (2)
8742          1.f, 4.f,
8743          4.f, 2.f,
8744          1.f, 6.f,
8745
8746         // Batch 0, Channel 1, Height (3) x Width (2)
8747          1.f, 1.f,
8748          4.f, 1.f,
8749         -2.f, 4.f
8750     };
8751     std::vector<float> expectedOutputValues
8752     {
8753         // Batch 0, Channel 0, Height (3) x Width (2)
8754         1.f, 4.f,
8755         4.f, 2.f,
8756         1.f, 6.f,
8757
8758         // Batch 0, Channel 1, Height (3) x Width (2)
8759         3.f, 3.f,
8760         4.f, 3.f,
8761         2.f, 4.f
8762     };
8763
8764     return BatchNormTestImpl<armnn::DataType::QuantisedAsymm8>(
8765         workloadFactory, memoryManager,
8766         inputOutputShape, inputValues, expectedOutputValues,
8767         1.f/20.f, 50, armnn::DataLayout::NCHW);
8768 }
8769
8770 LayerTestResult<uint8_t, 4> BatchNormUint8NhwcTest(
8771     armnn::IWorkloadFactory& workloadFactory,
8772     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8773 {
8774     // BatchSize: 1
8775     // Height: 3
8776     // Width: 2
8777     // Channels: 2
8778
8779     const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
8780     std::vector<float> inputValues
8781     {
8782         // Batch 0, Height 0, Width (2) x Channel (2)
8783         1.f,  1.f,
8784         4.f,  1.f,
8785
8786         // Batch 0, Height 1, Width (2) x Channel (2)
8787         4.f,  4.f,
8788         2.f,  1.f,
8789
8790         // Batch 0, Height 2, Width (2) x Channel (2)
8791         1.f, -2.f,
8792         6.f,  4.f
8793     };
8794     std::vector<float> expectedOutputValues
8795     {
8796         // Batch 0, Height 0, Width (2) x Channel (2)
8797         1.f, 3.f,
8798         4.f, 3.f,
8799
8800         // Batch 0, Height 1, Width (2) x Channel (2)
8801         4.f, 4.f,
8802         2.f, 3.f,
8803
8804         // Batch 0, Height 2, Width (2) x Channel (2)
8805         1.f, 2.f,
8806         6.f, 4.f
8807     };
8808
8809     return BatchNormTestImpl<armnn::DataType::QuantisedAsymm8>
8810         (workloadFactory, memoryManager,
8811          inputOutputShape, inputValues, expectedOutputValues,
8812          1.f/20.f, 50, armnn::DataLayout::NHWC);
8813 }
8814
8815 LayerTestResult<int16_t, 4> BatchNormInt16Test(
8816     armnn::IWorkloadFactory& workloadFactory,
8817     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8818 {
8819     // BatchSize: 1
8820     // Channels: 2
8821     // Height: 3
8822     // Width: 2
8823
8824     const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
8825     std::vector<float> inputValues
8826     {
8827         // Batch 0, Channel 0, Height (3) x Width (2)
8828          1.f, 4.f,
8829          4.f, 2.f,
8830          1.f, 6.f,
8831
8832         // Batch 0, Channel 1, Height (3) x Width (2)
8833          1.f, 1.f,
8834          4.f, 1.f,
8835         -2.f, 4.f
8836     };
8837     std::vector<float> expectedOutputValues
8838     {
8839         // Batch 0, Channel 0, Height (3) x Width (2)
8840         1.f, 4.f,
8841         4.f, 2.f,
8842         1.f, 6.f,
8843
8844         // Batch 0, Channel 1, Height (3) x Width (2)
8845         3.f, 3.f,
8846         4.f, 3.f,
8847         2.f, 4.f
8848     };
8849
8850     return BatchNormTestImpl<armnn::DataType::QuantisedSymm16>(
8851         workloadFactory, memoryManager,
8852         inputOutputShape, inputValues, expectedOutputValues,
8853         1.f/20.f, 50, armnn::DataLayout::NCHW);
8854 }
8855
8856 LayerTestResult<int16_t, 4> BatchNormInt16NhwcTest(
8857     armnn::IWorkloadFactory& workloadFactory,
8858     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8859 {
8860     // BatchSize: 1
8861     // Height: 3
8862     // Width: 2
8863     // Channels: 2
8864
8865     const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
8866     std::vector<float> inputValues
8867     {
8868         // Batch 0, Height 0, Width (2) x Channel (2)
8869         1.f,  1.f,
8870         4.f,  1.f,
8871
8872         // Batch 0, Height 1, Width (2) x Channel (2)
8873         4.f,  4.f,
8874         2.f,  1.f,
8875
8876         // Batch 0, Height 2, Width (2) x Channel (2)
8877         1.f, -2.f,
8878         6.f,  4.f
8879     };
8880     std::vector<float> expectedOutputValues
8881     {
8882         // Batch 0, Height 0, Width (2) x Channel (2)
8883         1.f, 3.f,
8884         4.f, 3.f,
8885
8886         // Batch 0, Height 1, Width (2) x Channel (2)
8887         4.f, 4.f,
8888         2.f, 3.f,
8889
8890         // Batch 0, Height 2, Width (2) x Channel (2)
8891         1.f, 2.f,
8892         6.f, 4.f
8893     };
8894
8895     return BatchNormTestImpl<armnn::DataType::QuantisedSymm16>
8896         (workloadFactory, memoryManager,
8897          inputOutputShape, inputValues, expectedOutputValues,
8898          1.f/20.f, 50, armnn::DataLayout::NHWC);
8899 }
8900
8901 LayerTestResult<uint8_t, 4> ConstantUint8CustomQuantizationScaleAndOffsetTest(
8902     armnn::IWorkloadFactory& workloadFactory,
8903     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8904 {
8905     return ConstantTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 2e-6f, 1);
8906 }
8907
8908 LayerTestResult<int16_t, 4> ConstantInt16CustomQuantizationScaleAndOffsetTest(
8909     armnn::IWorkloadFactory& workloadFactory,
8910     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8911 {
8912     return ConstantTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 2e-6f, 1);
8913 }
8914
8915 LayerTestResult<uint8_t, 1> Concatenation1dUint8Test(
8916     armnn::IWorkloadFactory& workloadFactory,
8917     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8918 {
8919     return Concatenation1dTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
8920 }
8921
8922 LayerTestResult<uint8_t, 2> Concatenation2dDim0Uint8Test(
8923     armnn::IWorkloadFactory& workloadFactory,
8924     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8925 {
8926     return Concatenation2dDim0TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
8927 }
8928
8929 LayerTestResult<uint8_t, 2> Concatenation2dDim1Uint8Test(
8930     armnn::IWorkloadFactory& workloadFactory,
8931     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8932 {
8933     return Concatenation2dDim1TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
8934 }
8935
8936 LayerTestResult<uint8_t, 2> Concatenation2dDim0DiffInputDimsUint8Test(
8937     armnn::IWorkloadFactory& workloadFactory,
8938     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8939 {
8940     return Concatenation2dDim0DiffInputDimsTestImpl<armnn::DataType::QuantisedAsymm8>(
8941         workloadFactory, memoryManager, 0.5f, -1);
8942 }
8943
8944 LayerTestResult<uint8_t, 2> Concatenation2dDim1DiffInputDimsUint8Test(
8945     armnn::IWorkloadFactory& workloadFactory,
8946     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8947 {
8948     return Concatenation2dDim1DiffInputDimsTestImpl<armnn::DataType::QuantisedAsymm8>(
8949         workloadFactory, memoryManager, 0.5f, -1);
8950 }
8951
8952 LayerTestResult<uint8_t, 3> Concatenation3dDim0Uint8Test(
8953     armnn::IWorkloadFactory& workloadFactory,
8954     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8955 {
8956     return Concatenation3dDim0TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
8957 }
8958
8959 LayerTestResult<uint8_t, 3> Concatenation3dDim1Uint8Test(
8960     armnn::IWorkloadFactory& workloadFactory,
8961     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8962 {
8963     return Concatenation3dDim1TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
8964 }
8965
8966 LayerTestResult<uint8_t, 3> Concatenation3dDim2Uint8Test(
8967     armnn::IWorkloadFactory& workloadFactory,
8968     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8969     bool useSubtensor)
8970 {
8971     return Concatenation3dDim2TestImpl<armnn::DataType::QuantisedAsymm8>(
8972         workloadFactory, memoryManager, useSubtensor, 0.5f, -1);
8973 }
8974
8975 LayerTestResult<uint8_t, 3> Concatenation3dDim0DiffInputDimsUint8Test(
8976     armnn::IWorkloadFactory& workloadFactory,
8977     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8978 {
8979     return Concatenation3dDim0TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
8980 }
8981
8982 LayerTestResult<uint8_t, 3> Concatenation3dDim1DiffInputDimsUint8Test(
8983     armnn::IWorkloadFactory& workloadFactory,
8984     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8985 {
8986     return Concatenation3dDim1DiffInputDimsTestImpl<armnn::DataType::QuantisedAsymm8>(
8987         workloadFactory, memoryManager, 0.5f, -1);
8988 }
8989
8990 LayerTestResult<uint8_t, 3> Concatenation3dDim2DiffInputDimsUint8Test(
8991     armnn::IWorkloadFactory& workloadFactory,
8992     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8993     bool useSubtensor)
8994 {
8995     return Concatenation3dDim2DiffInputDimsTestImpl<armnn::DataType::QuantisedAsymm8>(
8996         workloadFactory, memoryManager, useSubtensor, 0.5f, -1);
8997 }
8998
8999 LayerTestResult<uint8_t, 4> Concatenation4dDim0Uint8Test(
9000     armnn::IWorkloadFactory& workloadFactory,
9001     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9002 {
9003     return Concatenation4dDim0TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
9004 }
9005
9006 LayerTestResult<uint8_t, 4> Concatenation4dDim1Uint8Test(
9007     armnn::IWorkloadFactory& workloadFactory,
9008     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9009 {
9010     return Concatenation4dDim1TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
9011 }
9012
9013 LayerTestResult<uint8_t, 4> Concatenation4dDim2Uint8Test(
9014     armnn::IWorkloadFactory& workloadFactory,
9015     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9016 {
9017     return Concatenation4dDim2TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
9018 }
9019
9020 LayerTestResult<uint8_t, 4> Concatenation4dDim3Uint8Test(
9021     armnn::IWorkloadFactory& workloadFactory,
9022     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, bool useSubtensor)
9023 {
9024     return Concatenation4dDim3TestImpl<armnn::DataType::QuantisedAsymm8>(
9025         workloadFactory, memoryManager, 0.5f, -1, useSubtensor);
9026 }
9027
9028 LayerTestResult<uint8_t, 4> Concatenation4dDiffShapeDim0Uint8Test(
9029     armnn::IWorkloadFactory& workloadFactory,
9030     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9031 {
9032     return Concatenation4dDiffShapeDim0TestImpl<armnn::DataType::QuantisedAsymm8>(
9033         workloadFactory, memoryManager, 0.5f, -1);
9034 }
9035
9036 LayerTestResult<uint8_t, 4> Concatenation4dDiffShapeDim1Uint8Test(
9037     armnn::IWorkloadFactory& workloadFactory,
9038     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9039 {
9040     return Concatenation4dDiffShapeDim1TestImpl<armnn::DataType::QuantisedAsymm8>(
9041         workloadFactory, memoryManager, 0.5f, -1);
9042 }
9043
9044 LayerTestResult<uint8_t, 4> Concatenation4dDiffShapeDim2Uint8Test(
9045     armnn::IWorkloadFactory& workloadFactory,
9046     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9047 {
9048     return Concatenation4dDiffShapeDim2TestImpl<armnn::DataType::QuantisedAsymm8>(
9049         workloadFactory, memoryManager, 0.5f, -1);
9050 }
9051
9052 LayerTestResult<uint8_t, 4> Concatenation4dDiffShapeDim3Uint8Test(
9053     armnn::IWorkloadFactory& workloadFactory,
9054     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9055     bool useSubtensor)
9056 {
9057     return Concatenation4dDiffShapeDim3TestImpl<armnn::DataType::QuantisedAsymm8>(
9058         workloadFactory, memoryManager, 0.5f, -1, useSubtensor);
9059 }
9060
9061 LayerTestResult<float, 4> SimpleMaxPooling2dSize2x2Stride2x2Test(
9062     armnn::IWorkloadFactory& workloadFactory,
9063     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9064     bool forceNoPadding)
9065 {
9066     return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::Float32>(
9067         workloadFactory, memoryManager, forceNoPadding);
9068 }
9069
9070 LayerTestResult<uint8_t, 4> SimpleMaxPooling2dSize2x2Stride2x2Uint8Test(
9071     armnn::IWorkloadFactory& workloadFactory,
9072     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9073     bool forceNoPadding)
9074 {
9075     return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::QuantisedAsymm8>(
9076         workloadFactory, memoryManager, forceNoPadding, 3.0f, -5);
9077 }
9078
9079 LayerTestResult<int16_t, 4> SimpleMaxPooling2dSize2x2Stride2x2Int16Test(
9080     armnn::IWorkloadFactory& workloadFactory,
9081     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9082     bool forceNoPadding)
9083 {
9084     return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::QuantisedSymm16>(
9085             workloadFactory, memoryManager, forceNoPadding);
9086 }
9087
9088 LayerTestResult<float, 4> SimpleMaxPooling2dSize3x3Stride2x4Test(
9089     armnn::IWorkloadFactory& workloadFactory,
9090     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9091     bool forceNoPadding)
9092 {
9093     return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::Float32>(
9094         workloadFactory, memoryManager, forceNoPadding);
9095 }
9096
9097 LayerTestResult<uint8_t, 4> SimpleMaxPooling2dSize3x3Stride2x4Uint8Test(
9098     armnn::IWorkloadFactory& workloadFactory,
9099     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9100     bool forceNoPadding)
9101 {
9102     return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::QuantisedAsymm8>(
9103         workloadFactory, memoryManager, forceNoPadding, 0.1f, 128);
9104 }
9105
9106 LayerTestResult<int16_t, 4> SimpleMaxPooling2dSize3x3Stride2x4Int16Test(
9107     armnn::IWorkloadFactory& workloadFactory,
9108     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9109     bool forceNoPadding)
9110 {
9111     return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::QuantisedSymm16>(
9112             workloadFactory, memoryManager, forceNoPadding);
9113 }
9114
9115 LayerTestResult<float, 4> SimpleMaxPooling2dTest(
9116     armnn::IWorkloadFactory& workloadFactory,
9117     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9118     const armnn::DataLayout dataLayout)
9119 {
9120     return SimpleMaxPooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
9121 }
9122
9123 LayerTestResult<uint8_t, 4> SimpleMaxPooling2dUint8Test(
9124     armnn::IWorkloadFactory& workloadFactory,
9125     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9126     const armnn::DataLayout dataLayout)
9127 {
9128     return SimpleMaxPooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, dataLayout);
9129 }
9130
9131 LayerTestResult<int16_t, 4> SimpleMaxPooling2dInt16Test(
9132     armnn::IWorkloadFactory& workloadFactory,
9133     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9134     const armnn::DataLayout dataLayout)
9135 {
9136     return SimpleMaxPooling2dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, dataLayout);
9137 }
9138 LayerTestResult<float, 4> IgnorePaddingSimpleMaxPooling2dTest(
9139     armnn::IWorkloadFactory& workloadFactory,
9140     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9141 {
9142     return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
9143 }
9144
9145 LayerTestResult<uint8_t, 4> IgnorePaddingSimpleMaxPooling2dUint8Test(
9146     armnn::IWorkloadFactory& workloadFactory,
9147     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9148 {
9149     return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
9150             workloadFactory, memoryManager, 1.0f, -5);
9151 }
9152
9153 LayerTestResult<int16_t, 4> IgnorePaddingSimpleMaxPooling2dInt16Test(
9154     armnn::IWorkloadFactory& workloadFactory,
9155     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9156 {
9157     return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::QuantisedSymm16>(
9158             workloadFactory, memoryManager);
9159 }
9160
9161 LayerTestResult<float, 4> IgnorePaddingMaxPooling2dSize3Test(
9162     armnn::IWorkloadFactory& workloadFactory,
9163     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9164 {
9165     return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
9166 }
9167
9168 LayerTestResult<uint8_t, 4> IgnorePaddingMaxPooling2dSize3Uint8Test(
9169     armnn::IWorkloadFactory& workloadFactory,
9170     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9171 {
9172     return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::QuantisedAsymm8>(
9173             workloadFactory, memoryManager, 1.0f, -5);
9174 }
9175
9176 LayerTestResult<int16_t, 4> IgnorePaddingMaxPooling2dSize3Int16Test(
9177     armnn::IWorkloadFactory& workloadFactory,
9178     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9179 {
9180     return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::QuantisedSymm16>(
9181             workloadFactory, memoryManager);
9182 }
9183
9184 LayerTestResult<float, 4> SimpleAveragePooling2dTest(
9185     armnn::IWorkloadFactory& workloadFactory,
9186     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9187     const armnn::DataLayout dataLayout)
9188 {
9189     return SimpleAveragePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
9190 }
9191
9192 LayerTestResult<uint8_t, 4> SimpleAveragePooling2dUint8Test(
9193     armnn::IWorkloadFactory& workloadFactory,
9194     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9195     const armnn::DataLayout dataLayout)
9196 {
9197     return SimpleAveragePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
9198         workloadFactory, memoryManager, dataLayout, 0.5, -1);
9199 }
9200
9201 LayerTestResult<int16_t, 4> SimpleAveragePooling2dInt16Test(
9202     armnn::IWorkloadFactory& workloadFactory,
9203     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9204     const armnn::DataLayout dataLayout)
9205 {
9206     return SimpleAveragePooling2dTestCommon<armnn::DataType::QuantisedSymm16>(
9207             workloadFactory, memoryManager, dataLayout);
9208 }
9209
9210 LayerTestResult<float, 4> IgnorePaddingAveragePooling2dSize3x2Stride2x2Test(
9211     armnn::IWorkloadFactory& workloadFactory,
9212     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9213     bool forceNoPadding)
9214 {
9215     return IgnorePaddingAveragePooling2dSize3x2Stride2x2TestCommon<armnn::DataType::Float32>(
9216         workloadFactory, memoryManager, forceNoPadding);
9217 }
9218
9219 LayerTestResult<float, 4> LargeTensorsAveragePooling2dTest(
9220     armnn::IWorkloadFactory& workloadFactory,
9221     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9222 {
9223     return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
9224 }
9225
9226 LayerTestResult<uint8_t, 4> LargeTensorsAveragePooling2dUint8Test(
9227     armnn::IWorkloadFactory& workloadFactory,
9228     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9229 {
9230     return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
9231         workloadFactory, memoryManager, 0.5, -1);
9232 }
9233
9234 LayerTestResult<int16_t, 4> LargeTensorsAveragePooling2dInt16Test(
9235     armnn::IWorkloadFactory& workloadFactory,
9236     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9237 {
9238     return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::QuantisedSymm16>(
9239             workloadFactory, memoryManager);
9240 }
9241 LayerTestResult<float, 4> IgnorePaddingSimpleAveragePooling2dTest(
9242     armnn::IWorkloadFactory& workloadFactory,
9243     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9244 {
9245     return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
9246 }
9247
9248 LayerTestResult<uint8_t, 4> IgnorePaddingSimpleAveragePooling2dUint8Test(
9249     armnn::IWorkloadFactory& workloadFactory,
9250     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9251 {
9252     return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
9253             workloadFactory, memoryManager);
9254 }
9255
9256 LayerTestResult<int16_t, 4> IgnorePaddingSimpleAveragePooling2dInt16Test(
9257     armnn::IWorkloadFactory& workloadFactory,
9258     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9259 {
9260     return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::QuantisedSymm16>(
9261             workloadFactory, memoryManager);
9262 }
9263
9264 LayerTestResult<float, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingTest(
9265     armnn::IWorkloadFactory& workloadFactory,
9266     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9267 {
9268     return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::Float32>(
9269             workloadFactory, memoryManager);
9270 }
9271
9272 LayerTestResult<uint8_t, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingUint8Test(
9273     armnn::IWorkloadFactory& workloadFactory,
9274     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9275 {
9276     return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::QuantisedAsymm8>(
9277             workloadFactory, memoryManager);
9278 }
9279
9280 LayerTestResult<int16_t, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingInt16Test(
9281     armnn::IWorkloadFactory& workloadFactory,
9282     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9283 {
9284     return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::QuantisedSymm16>(
9285             workloadFactory, memoryManager);
9286 }
9287
9288 LayerTestResult<float, 4> IgnorePaddingAveragePooling2dSize3Test(
9289     armnn::IWorkloadFactory& workloadFactory,
9290     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9291 {
9292     return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
9293 }
9294
9295 LayerTestResult<uint8_t, 4> IgnorePaddingAveragePooling2dSize3Uint8Test(
9296     armnn::IWorkloadFactory& workloadFactory,
9297     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9298 {
9299     return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::QuantisedAsymm8>(
9300             workloadFactory, memoryManager);
9301 }
9302
9303 LayerTestResult<int16_t, 4> IgnorePaddingAveragePooling2dSize3Int16Test(
9304     armnn::IWorkloadFactory& workloadFactory,
9305     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9306 {
9307     return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::QuantisedSymm16>(
9308             workloadFactory, memoryManager);
9309 }
9310
9311 LayerTestResult<float, 4> SimpleL2Pooling2dTest(
9312     armnn::IWorkloadFactory& workloadFactory,
9313     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9314     const armnn::DataLayout dataLayout)
9315 {
9316     return SimpleL2Pooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
9317 }
9318
9319 LayerTestResult<uint8_t, 4> SimpleL2Pooling2dUint8Test(
9320     armnn::IWorkloadFactory& workloadFactory,
9321     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9322     const armnn::DataLayout dataLayout)
9323 {
9324     return SimpleL2Pooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, dataLayout);
9325 }
9326
9327 LayerTestResult<int16_t, 4> SimpleL2Pooling2dInt16Test(
9328     armnn::IWorkloadFactory& workloadFactory,
9329     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9330     const armnn::DataLayout dataLayout)
9331 {
9332     return SimpleL2Pooling2dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, dataLayout);
9333 }
9334
9335 LayerTestResult<float, 4> L2Pooling2dSize3Stride1Test(
9336     armnn::IWorkloadFactory& workloadFactory,
9337     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9338 {
9339     return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
9340 }
9341
9342 LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride1Uint8Test(
9343     armnn::IWorkloadFactory& workloadFactory,
9344     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9345 {
9346     return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9347 }
9348
9349 LayerTestResult<int16_t, 4> L2Pooling2dSize3Stride1Int16Test(
9350     armnn::IWorkloadFactory& workloadFactory,
9351     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9352 {
9353     return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9354 }
9355
9356 LayerTestResult<float, 4> L2Pooling2dSize3Stride3Test(
9357     armnn::IWorkloadFactory& workloadFactory,
9358     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9359 {
9360     return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
9361 }
9362
9363 LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride3Uint8Test(
9364     armnn::IWorkloadFactory& workloadFactory,
9365     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9366 {
9367     return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9368 }
9369
9370 LayerTestResult<int16_t, 4> L2Pooling2dSize3Stride3Int16Test(
9371     armnn::IWorkloadFactory& workloadFactory,
9372     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9373 {
9374     return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9375 }
9376 LayerTestResult<float, 4> L2Pooling2dSize3Stride4Test(
9377     armnn::IWorkloadFactory& workloadFactory,
9378     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9379 {
9380     return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
9381 }
9382
9383 LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride4Uint8Test(
9384     armnn::IWorkloadFactory& workloadFactory,
9385     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9386 {
9387     return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9388 }
9389
9390 LayerTestResult<int16_t, 4> L2Pooling2dSize3Stride4Int16Test(
9391     armnn::IWorkloadFactory& workloadFactory,
9392     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9393 {
9394     return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9395 }
9396
9397 LayerTestResult<float, 4> L2Pooling2dSize7Test(
9398     armnn::IWorkloadFactory& workloadFactory,
9399     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9400 {
9401     return L2Pooling2dSize7TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
9402 }
9403
9404 LayerTestResult<uint8_t, 4> L2Pooling2dSize7Uint8Test(
9405     armnn::IWorkloadFactory& workloadFactory,
9406     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9407 {
9408     return L2Pooling2dSize7TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9409 }
9410
9411 LayerTestResult<int16_t, 4> L2Pooling2dSize7Int16Test(
9412     armnn::IWorkloadFactory& workloadFactory,
9413     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9414 {
9415     return L2Pooling2dSize7TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9416 }
9417
9418 LayerTestResult<float, 4> L2Pooling2dSize9Test(
9419     armnn::IWorkloadFactory& workloadFactory,
9420     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9421 {
9422     return L2Pooling2dSize9TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
9423 }
9424
9425 LayerTestResult<uint8_t, 4> L2Pooling2dSize9Uint8Test(
9426     armnn::IWorkloadFactory& workloadFactory,
9427     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9428 {
9429     return L2Pooling2dSize9TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9430 }
9431
9432 LayerTestResult<int16_t, 4> L2Pooling2dSize9Int16Test(
9433     armnn::IWorkloadFactory& workloadFactory,
9434     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9435 {
9436     return L2Pooling2dSize9TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9437 }
9438 LayerTestResult<float, 4> IgnorePaddingSimpleL2Pooling2dTest(
9439     armnn::IWorkloadFactory& workloadFactory,
9440     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9441 {
9442     return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
9443 }
9444
9445 LayerTestResult<uint8_t, 4> IgnorePaddingSimpleL2Pooling2dUint8Test(
9446     armnn::IWorkloadFactory& workloadFactory,
9447     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9448 {
9449     return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9450 }
9451
9452 LayerTestResult<int16_t, 4> IgnorePaddingSimpleL2Pooling2dInt16Test(
9453     armnn::IWorkloadFactory& workloadFactory,
9454     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9455 {
9456     return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9457 }
9458
9459 LayerTestResult<float, 4> IgnorePaddingL2Pooling2dSize3Test(
9460     armnn::IWorkloadFactory& workloadFactory,
9461     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9462 {
9463     return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
9464 }
9465
9466 LayerTestResult<uint8_t, 4> IgnorePaddingL2Pooling2dSize3Uint8Test(
9467     armnn::IWorkloadFactory& workloadFactory,
9468     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9469 {
9470     return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9471 }
9472
9473 LayerTestResult<int16_t, 4> IgnorePaddingL2Pooling2dSize3Int16Test(
9474     armnn::IWorkloadFactory& workloadFactory,
9475     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9476 {
9477     return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9478 }
9479
9480 LayerTestResult<float, 4> AsymmetricNonSquarePooling2dTest(
9481     armnn::IWorkloadFactory& workloadFactory,
9482     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9483 {
9484     return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
9485 }
9486
9487 LayerTestResult<uint8_t, 4> AsymmetricNonSquarePooling2dUint8Test(
9488     armnn::IWorkloadFactory& workloadFactory,
9489     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9490 {
9491     return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9492 }
9493
9494 LayerTestResult<int16_t, 4> AsymmetricNonSquarePooling2dInt16Test(
9495     armnn::IWorkloadFactory& workloadFactory,
9496     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9497 {
9498     return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9499 }
9500
9501 LayerTestResult<float, 4> ComparePooling2dTest(
9502     armnn::IWorkloadFactory& workloadFactory,
9503     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9504     armnn::IWorkloadFactory& refWorkloadFactory,
9505     armnn::PoolingAlgorithm  poolingType)
9506 {
9507     return ComparePooling2dTestCommon<armnn::DataType::Float32>(
9508         workloadFactory, memoryManager, refWorkloadFactory, poolingType);
9509 }
9510
9511 LayerTestResult<uint8_t, 4> ComparePooling2dUint8Test(
9512     armnn::IWorkloadFactory& workloadFactory,
9513     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9514     armnn::IWorkloadFactory& refWorkloadFactory,
9515     armnn::PoolingAlgorithm  poolingType)
9516 {
9517     return ComparePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
9518         workloadFactory, memoryManager, refWorkloadFactory, poolingType, 0.1f, 128);
9519 }
9520
9521 LayerTestResult<int16_t, 4> ComparePooling2dInt16Test(
9522     armnn::IWorkloadFactory& workloadFactory,
9523     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9524     armnn::IWorkloadFactory& refWorkloadFactory,
9525     armnn::PoolingAlgorithm  poolingType)
9526 {
9527     return ComparePooling2dTestCommon<armnn::DataType::QuantisedSymm16>(
9528             workloadFactory, memoryManager, refWorkloadFactory, poolingType);
9529 }
9530
9531 LayerTestResult<float, 2> FullyConnectedLargeTest(
9532     armnn::IWorkloadFactory& workloadFactory,
9533     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9534     bool transposeWeights)
9535 {
9536     return FullyConnectedLargeTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, transposeWeights);
9537 }
9538
9539 LayerTestResult<float, 4> AdditionAfterMaxPoolTest(
9540     armnn::IWorkloadFactory& workloadFactory,
9541     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9542 {
9543     // Create Initial Tensor
9544     // 1, 2, 3
9545     // 4, 5, 6
9546     // 7, 8, 9
9547
9548     armnn::TensorInfo poolingInputTensorInfo({ 1, 1, 3, 3}, armnn::DataType::Float32);
9549     armnn::TensorInfo poolingOutputTensorInfo({ 1, 1, 2, 2}, armnn::DataType::Float32);
9550
9551     boost::multi_array<float, 4> poolingInput = MakeTensor<float,4>(poolingInputTensorInfo,
9552                                                             {1, 2, 3,
9553                                                              4, 5, 6,
9554                                                              7, 8, 9
9555                                                             });
9556
9557     std::unique_ptr<armnn::ITensorHandle> poolingInputHandle =
9558             workloadFactory.CreateTensorHandle(poolingInputTensorInfo);
9559     std::unique_ptr<armnn::ITensorHandle> poolingOutputHandle =
9560             workloadFactory.CreateTensorHandle(poolingOutputTensorInfo);
9561
9562     // Apply MaxPool poolSize = 1x1, stride=2x2
9563     // Result =
9564     // 1, 3
9565     // 7, 9
9566     armnn::Pooling2dDescriptor descriptor;
9567     descriptor.m_PoolHeight = 1;
9568     descriptor.m_PoolWidth = 1;
9569     descriptor.m_StrideX = 2;
9570     descriptor.m_StrideY = 2;
9571     descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
9572
9573     armnn::Pooling2dQueueDescriptor queueDescriptor;
9574     queueDescriptor.m_Parameters = descriptor;
9575     armnn::WorkloadInfo workloadInfo;
9576     AddInputToWorkload(queueDescriptor, workloadInfo, poolingInputTensorInfo, poolingInputHandle.get());
9577     AddOutputToWorkload(queueDescriptor, workloadInfo, poolingOutputTensorInfo, poolingOutputHandle.get());
9578
9579     // Create the MaxPool
9580     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePooling2d(queueDescriptor, workloadInfo);
9581
9582     //LayerTestResult<float, 4> result(poolingOutputTensorInfo);
9583     auto shape( GetTensorShapeAsArray<4>(poolingOutputTensorInfo));
9584     boost::multi_array<float, 4> resultMaxPool;
9585     resultMaxPool.resize(shape);
9586
9587
9588     // Create addition with another tensor the same size
9589     // This would be the result to apply a Conv2d with kernel ones(2) and stride 1x1
9590     // with the initial tensor.
9591     // 12, 16
9592     // 24, 28
9593
9594     armnn::TensorInfo addInputTensorInfo({ 1,1,2,2}, armnn::DataType::Float32);
9595     armnn::TensorInfo addOutputTensorInfo({ 1,1,2,2}, armnn::DataType::Float32);
9596
9597     boost::multi_array<float, 4> addInput = MakeTensor<float,4>(addInputTensorInfo,
9598                                                                     {12, 16,
9599                                                                      24, 28,
9600                                                                     });
9601
9602     // Expected output tensor after MaxPool and Addition.
9603     LayerTestResult<float,4> addRet(addOutputTensorInfo);
9604     addRet.outputExpected = MakeTensor<float, 4>(addOutputTensorInfo, std::vector<float>(
9605             {
9606                     13, 19,
9607                     31, 37
9608             }));
9609
9610     std::unique_ptr<armnn::ITensorHandle> addInputHandle = workloadFactory.CreateTensorHandle(addInputTensorInfo);
9611     std::unique_ptr<armnn::ITensorHandle> addOutputHandle = workloadFactory.CreateTensorHandle(addOutputTensorInfo);
9612
9613     armnn::AdditionQueueDescriptor data;
9614     armnn::WorkloadInfo info;
9615
9616     // Add the output of the MaxPool and the new tensor
9617     AddInputToWorkload(data, info, poolingOutputTensorInfo, poolingOutputHandle.get());
9618     AddInputToWorkload(data, info, addInputTensorInfo, addInputHandle.get());
9619     AddOutputToWorkload(data, info, addOutputTensorInfo, addOutputHandle.get());
9620
9621     std::unique_ptr<armnn::IWorkload> addWorkload = workloadFactory.CreateAddition(data, info);
9622
9623     poolingInputHandle->Allocate();
9624     poolingOutputHandle->Allocate();
9625     addInputHandle->Allocate();
9626     addOutputHandle->Allocate();
9627
9628     CopyDataToITensorHandle(poolingInputHandle.get(), &poolingInput[0][0][0][0]);
9629     CopyDataFromITensorHandle(&resultMaxPool[0][0][0][0], poolingOutputHandle.get());
9630
9631     CopyDataToITensorHandle(poolingOutputHandle.get(), &resultMaxPool[0][0][0][0]);
9632     CopyDataToITensorHandle(addInputHandle.get(), &addInput[0][0][0][0]);
9633
9634     workload->PostAllocationConfigure();
9635     workload->Execute();
9636     addWorkload->PostAllocationConfigure();
9637     addWorkload->Execute();
9638
9639     CopyDataFromITensorHandle(&addRet.output[0][0][0][0], addOutputHandle.get());
9640
9641     return addRet;
9642 }
9643
9644 LayerTestResult<float, 4> SpaceToBatchNdSimpleFloat32Test(
9645     armnn::IWorkloadFactory& workloadFactory,
9646     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9647 {
9648     return SpaceToBatchNdSimpleTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
9649 }
9650
9651 LayerTestResult<float, 4> SpaceToBatchNdMultiChannelsFloat32Test(
9652     armnn::IWorkloadFactory& workloadFactory,
9653     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9654 {
9655     return SpaceToBatchNdMultiChannelsTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
9656 }
9657
9658 LayerTestResult<float, 4> SpaceToBatchNdMultiBlockFloat32Test(
9659     armnn::IWorkloadFactory& workloadFactory,
9660     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9661 {
9662     return SpaceToBatchNdMultiBlockTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
9663 }
9664
9665 LayerTestResult<float, 4> SpaceToBatchNdPaddingFloat32Test(
9666     armnn::IWorkloadFactory& workloadFactory,
9667     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9668 {
9669     return SpaceToBatchNdPaddingTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
9670 }
9671
9672 LayerTestResult<uint8_t, 4> SpaceToBatchNdSimpleUint8Test(
9673     armnn::IWorkloadFactory& workloadFactory,
9674     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9675 {
9676     return SpaceToBatchNdSimpleTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9677 }
9678
9679 LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiChannelsUint8Test(
9680     armnn::IWorkloadFactory& workloadFactory,
9681     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9682 {
9683     return SpaceToBatchNdMultiChannelsTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9684 }
9685
9686 LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiBlockUint8Test(
9687     armnn::IWorkloadFactory& workloadFactory,
9688     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9689 {
9690     return SpaceToBatchNdMultiBlockTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9691 }
9692
9693 LayerTestResult<uint8_t, 4> SpaceToBatchNdPaddingUint8Test(
9694     armnn::IWorkloadFactory& workloadFactory,
9695     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9696 {
9697     return SpaceToBatchNdPaddingTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9698 }
9699
9700 LayerTestResult<float, 4> SpaceToBatchNdSimpleNHWCFloat32Test(
9701     armnn::IWorkloadFactory& workloadFactory,
9702     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9703 {
9704     return SpaceToBatchNdSimpleNHWCTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
9705 }
9706
9707 LayerTestResult<float, 4> SpaceToBatchNdMultiChannelsNHWCFloat32Test(
9708     armnn::IWorkloadFactory& workloadFactory,
9709     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9710 {
9711     return SpaceToBatchNdMultiChannelsNHWCTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
9712 }
9713
9714 LayerTestResult<float, 4> SpaceToBatchNdMultiBlockNHWCFloat32Test(
9715     armnn::IWorkloadFactory& workloadFactory,
9716     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9717 {
9718     return SpaceToBatchNdMultiBlockNHWCTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
9719 }
9720
9721 LayerTestResult<float, 4> SpaceToBatchNdPaddingNHWCFloat32Test(
9722     armnn::IWorkloadFactory& workloadFactory,
9723     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9724 {
9725     return SpaceToBatchNdPaddingNHWCTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
9726 }
9727
9728 LayerTestResult<uint8_t, 4> SpaceToBatchNdSimpleNHWCUint8Test(
9729     armnn::IWorkloadFactory& workloadFactory,
9730     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9731 {
9732     return SpaceToBatchNdSimpleNHWCTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9733 }
9734
9735 LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiChannelsNHWCUint8Test(
9736     armnn::IWorkloadFactory& workloadFactory,
9737     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9738 {
9739     return SpaceToBatchNdMultiChannelsNHWCTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9740 }
9741
9742 LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiBlockNHWCUint8Test(
9743     armnn::IWorkloadFactory& workloadFactory,
9744     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9745 {
9746     return SpaceToBatchNdMultiBlockNHWCTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9747 }
9748
9749 LayerTestResult<uint8_t, 4> SpaceToBatchNdPaddingNHWCUint8Test(
9750     armnn::IWorkloadFactory& workloadFactory,
9751     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9752 {
9753     return SpaceToBatchNdPaddingNHWCTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9754 }
9755
9756 LayerTestResult<int16_t, 4> SpaceToBatchNdSimpleUint16Test(
9757         armnn::IWorkloadFactory& workloadFactory,
9758         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9759 {
9760     return SpaceToBatchNdSimpleTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9761 }
9762
9763 LayerTestResult<int16_t, 4> SpaceToBatchNdMultiChannelsUint16Test(
9764         armnn::IWorkloadFactory& workloadFactory,
9765         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9766 {
9767     return SpaceToBatchNdMultiChannelsTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9768 }
9769
9770 LayerTestResult<int16_t, 4> SpaceToBatchNdMultiBlockUint16Test(
9771         armnn::IWorkloadFactory& workloadFactory,
9772         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9773 {
9774     return SpaceToBatchNdMultiBlockTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9775 }
9776
9777 LayerTestResult<int16_t, 4> SpaceToBatchNdPaddingUint16Test(
9778         armnn::IWorkloadFactory& workloadFactory,
9779         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9780 {
9781     return SpaceToBatchNdPaddingTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9782 }
9783
9784 LayerTestResult<int16_t, 4> SpaceToBatchNdSimpleNHWCUint16Test(
9785         armnn::IWorkloadFactory& workloadFactory,
9786         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9787 {
9788     return SpaceToBatchNdSimpleNHWCTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9789 }
9790
9791 LayerTestResult<int16_t, 4> SpaceToBatchNdMultiChannelsNHWCUint16Test(
9792         armnn::IWorkloadFactory& workloadFactory,
9793         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9794 {
9795     return SpaceToBatchNdMultiChannelsNHWCTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9796 }
9797
9798 LayerTestResult<int16_t, 4> SpaceToBatchNdMultiBlockNHWCUint16Test(
9799         armnn::IWorkloadFactory& workloadFactory,
9800         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9801 {
9802     return SpaceToBatchNdMultiBlockNHWCTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9803 }
9804
9805 LayerTestResult<int16_t, 4> SpaceToBatchNdPaddingNHWCUint16Test(
9806         armnn::IWorkloadFactory& workloadFactory,
9807         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9808 {
9809     return SpaceToBatchNdPaddingNHWCTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9810 }
9811
9812 LayerTestResult<uint8_t, 4> SpaceToDepthNHWCAsymmQ8Test(
9813     armnn::IWorkloadFactory& workloadFactory,
9814     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9815 {
9816     return SpaceToDepthSimpleTest1<armnn::DataType::QuantisedAsymm8>(
9817         workloadFactory,
9818         memoryManager);
9819 }
9820
9821 LayerTestResult<uint8_t, 4> SpaceToDepthNCHWAsymmQ8Test(
9822     armnn::IWorkloadFactory& workloadFactory,
9823     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9824 {
9825     return SpaceToDepthSimpleTest1<armnn::DataType::QuantisedAsymm8>(
9826         workloadFactory,
9827         memoryManager,
9828         armnn::DataLayout::NCHW);
9829 }
9830
9831 LayerTestResult<float, 4> SpaceToDepthNHWCFloat32Test1(
9832     armnn::IWorkloadFactory& workloadFactory,
9833     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9834 {
9835     return SpaceToDepthSimpleTest1<armnn::DataType::Float32>(
9836         workloadFactory,
9837         memoryManager);
9838 }
9839
9840 LayerTestResult<float, 4> SpaceToDepthNCHWFloat32Test1(
9841     armnn::IWorkloadFactory& workloadFactory,
9842     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9843 {
9844     return SpaceToDepthSimpleTest1<armnn::DataType::Float32>(
9845         workloadFactory,
9846         memoryManager,
9847         armnn::DataLayout::NCHW);
9848 }
9849
9850 LayerTestResult<float, 4> SpaceToDepthNHWCFloat32Test2(
9851     armnn::IWorkloadFactory& workloadFactory,
9852     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9853 {
9854     return SpaceToDepthSimpleTest2<armnn::DataType::Float32>(
9855         workloadFactory,
9856         memoryManager);
9857 }
9858
9859 LayerTestResult<float, 4> SpaceToDepthNCHWFloat32Test2(
9860     armnn::IWorkloadFactory& workloadFactory,
9861     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9862 {
9863     return SpaceToDepthSimpleTest2<armnn::DataType::Float32>(
9864         workloadFactory,
9865         memoryManager,
9866         armnn::DataLayout::NCHW);
9867 }
9868
9869 LayerTestResult<int16_t, 4> SpaceToDepthNHWCQSymm16Test(
9870     armnn::IWorkloadFactory& workloadFactory,
9871     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9872 {
9873     return SpaceToDepthSimpleTest2<armnn::DataType::QuantisedSymm16>(
9874         workloadFactory,
9875         memoryManager);
9876 }
9877
9878 LayerTestResult<int16_t, 4> SpaceToDepthNCHWQSymm16Test(
9879     armnn::IWorkloadFactory& workloadFactory,
9880     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9881 {
9882     return SpaceToDepthSimpleTest2<armnn::DataType::QuantisedSymm16>(
9883         workloadFactory,
9884         memoryManager,
9885         armnn::DataLayout::NCHW);
9886 }
9887
9888 namespace {
9889
9890 } // anonymous namespace
9891
9892 LayerTestResult<float, 4> StridedSlice4DFloat32Test(
9893     armnn::IWorkloadFactory& workloadFactory,
9894     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9895 {
9896     return StridedSlice4DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
9897 }
9898
9899 LayerTestResult<float, 4> StridedSlice4DReverseFloat32Test(
9900     armnn::IWorkloadFactory& workloadFactory,
9901     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9902 {
9903     return StridedSlice4DReverseTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
9904 }
9905
9906 LayerTestResult<float, 4> StridedSliceSimpleStrideFloat32Test(
9907     armnn::IWorkloadFactory& workloadFactory,
9908     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9909 {
9910     return StridedSliceSimpleStrideTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
9911 }
9912
9913 LayerTestResult<float, 4> StridedSliceSimpleRangeMaskFloat32Test(
9914     armnn::IWorkloadFactory& workloadFactory,
9915     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9916 {
9917     return StridedSliceSimpleRangeMaskTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
9918 }
9919
9920 LayerTestResult<float, 2> StridedSliceShrinkAxisMaskFloat32Test(
9921     armnn::IWorkloadFactory& workloadFactory,
9922     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9923 {
9924     return StridedSliceShrinkAxisMaskTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
9925 }
9926
9927 LayerTestResult<float, 3> StridedSlice3DFloat32Test(
9928     armnn::IWorkloadFactory& workloadFactory,
9929     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9930 {
9931     return StridedSlice3DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
9932 }
9933
9934 LayerTestResult<float, 3> StridedSlice3DReverseFloat32Test(
9935     armnn::IWorkloadFactory& workloadFactory,
9936     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9937 {
9938     return StridedSlice3DReverseTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
9939 }
9940
9941 LayerTestResult<float, 2> StridedSlice2DFloat32Test(
9942     armnn::IWorkloadFactory& workloadFactory,
9943     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9944 {
9945     return StridedSlice2DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
9946 }
9947
9948 LayerTestResult<float, 2> StridedSlice2DReverseFloat32Test(
9949     armnn::IWorkloadFactory& workloadFactory,
9950     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9951 {
9952     return StridedSlice2DReverseTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
9953 }
9954
9955 LayerTestResult<uint8_t, 4> StridedSlice4DUint8Test(
9956     armnn::IWorkloadFactory& workloadFactory,
9957     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9958 {
9959     return StridedSlice4DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9960 }
9961
9962 LayerTestResult<uint8_t, 4> StridedSlice4DReverseUint8Test(
9963     armnn::IWorkloadFactory& workloadFactory,
9964     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9965 {
9966     return StridedSlice4DReverseTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9967 }
9968
9969 LayerTestResult<uint8_t, 4> StridedSliceSimpleStrideUint8Test(
9970     armnn::IWorkloadFactory& workloadFactory,
9971     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9972 {
9973     return StridedSliceSimpleStrideTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9974 }
9975
9976 LayerTestResult<uint8_t, 4> StridedSliceSimpleRangeMaskUint8Test(
9977     armnn::IWorkloadFactory& workloadFactory,
9978     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9979 {
9980     return StridedSliceSimpleRangeMaskTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9981 }
9982
9983 LayerTestResult<uint8_t, 2> StridedSliceShrinkAxisMaskUint8Test(
9984     armnn::IWorkloadFactory& workloadFactory,
9985     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9986 {
9987     return StridedSliceShrinkAxisMaskTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9988 }
9989
9990 LayerTestResult<uint8_t, 3> StridedSlice3DUint8Test(
9991     armnn::IWorkloadFactory& workloadFactory,
9992     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9993 {
9994     return StridedSlice3DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9995 }
9996
9997 LayerTestResult<uint8_t, 3> StridedSlice3DReverseUint8Test(
9998     armnn::IWorkloadFactory& workloadFactory,
9999     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10000 {
10001     return StridedSlice3DReverseTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
10002 }
10003
10004 LayerTestResult<uint8_t, 2> StridedSlice2DUint8Test(
10005     armnn::IWorkloadFactory& workloadFactory,
10006     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10007 {
10008     return StridedSlice2DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
10009 }
10010
10011 LayerTestResult<uint8_t, 2> StridedSlice2DReverseUint8Test(
10012     armnn::IWorkloadFactory& workloadFactory,
10013     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10014 {
10015     return StridedSlice2DReverseTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
10016 }
10017
10018 LayerTestResult<int16_t, 4> StridedSlice4DInt16Test(
10019     armnn::IWorkloadFactory& workloadFactory,
10020     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10021 {
10022     return StridedSlice4DTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
10023 }
10024
10025 LayerTestResult<int16_t, 4> StridedSlice4DReverseInt16Test(
10026     armnn::IWorkloadFactory& workloadFactory,
10027     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10028 {
10029     return StridedSlice4DReverseTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
10030 }
10031
10032 LayerTestResult<int16_t, 4> StridedSliceSimpleStrideInt16Test(
10033     armnn::IWorkloadFactory& workloadFactory,
10034     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10035 {
10036     return StridedSliceSimpleStrideTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
10037 }
10038
10039 LayerTestResult<int16_t, 4> StridedSliceSimpleRangeMaskInt16Test(
10040     armnn::IWorkloadFactory& workloadFactory,
10041     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10042 {
10043     return StridedSliceSimpleRangeMaskTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
10044 }
10045
10046 LayerTestResult<int16_t, 2> StridedSliceShrinkAxisMaskInt16Test(
10047     armnn::IWorkloadFactory& workloadFactory,
10048     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10049 {
10050     return StridedSliceShrinkAxisMaskTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
10051 }
10052
10053 LayerTestResult<int16_t, 3> StridedSlice3DInt16Test(
10054     armnn::IWorkloadFactory& workloadFactory,
10055     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10056 {
10057     return StridedSlice3DTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
10058 }
10059
10060 LayerTestResult<int16_t, 3> StridedSlice3DReverseInt16Test(
10061     armnn::IWorkloadFactory& workloadFactory,
10062     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10063 {
10064     return StridedSlice3DReverseTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
10065 }
10066
10067 LayerTestResult<int16_t, 2> StridedSlice2DInt16Test(
10068     armnn::IWorkloadFactory& workloadFactory,
10069     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10070 {
10071     return StridedSlice2DTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
10072 }
10073
10074 LayerTestResult<int16_t, 2> StridedSlice2DReverseInt16Test(
10075     armnn::IWorkloadFactory& workloadFactory,
10076     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10077 {
10078     return StridedSlice2DReverseTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
10079 }
10080
10081 LayerTestResult<float, 4> Debug4DFloat32Test(
10082     armnn::IWorkloadFactory& workloadFactory,
10083     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10084 {
10085     return Debug4DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
10086 }
10087
10088 LayerTestResult<float, 3> Debug3DFloat32Test(
10089     armnn::IWorkloadFactory& workloadFactory,
10090     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10091 {
10092     return Debug3DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
10093 }
10094
10095 LayerTestResult<float, 2> Debug2DFloat32Test(
10096     armnn::IWorkloadFactory& workloadFactory,
10097     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10098 {
10099     return Debug2DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
10100 }
10101
10102 LayerTestResult<float, 1> Debug1DFloat32Test(
10103     armnn::IWorkloadFactory& workloadFactory,
10104     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10105 {
10106     return Debug1DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
10107 }
10108
10109 LayerTestResult<uint8_t, 4> Debug4DUint8Test(
10110     armnn::IWorkloadFactory& workloadFactory,
10111     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10112 {
10113     return Debug4DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
10114 }
10115
10116 LayerTestResult<uint8_t, 3> Debug3DUint8Test(
10117     armnn::IWorkloadFactory& workloadFactory,
10118     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10119 {
10120     return Debug3DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
10121 }
10122
10123 LayerTestResult<uint8_t, 2> Debug2DUint8Test(
10124     armnn::IWorkloadFactory& workloadFactory,
10125     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10126 {
10127     return Debug2DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
10128 }
10129
10130 LayerTestResult<uint8_t, 1> Debug1DUint8Test(
10131     armnn::IWorkloadFactory& workloadFactory,
10132     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10133 {
10134     return Debug1DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
10135 }
10136
10137 LayerTestResult<float, 1> Gather1DParamsFloatTest(
10138     armnn::IWorkloadFactory& workloadFactory,
10139     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10140 {
10141     return Gather1DParamsTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager);
10142 }
10143
10144 LayerTestResult<uint8_t, 1> Gather1DParamsUint8Test(
10145     armnn::IWorkloadFactory& workloadFactory,
10146     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10147 {
10148     return Gather1DParamsTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
10149 }
10150
10151 LayerTestResult<int16_t, 1> Gather1DParamsInt16Test(
10152         armnn::IWorkloadFactory& workloadFactory,
10153         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10154 {
10155     return Gather1DParamsTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
10156 }
10157
10158 LayerTestResult<float, 2> GatherMultiDimParamsFloatTest(
10159     armnn::IWorkloadFactory& workloadFactory,
10160     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10161 {
10162     return GatherMultiDimParamsTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager);
10163 }
10164
10165 LayerTestResult<uint8_t, 2> GatherMultiDimParamsUint8Test(
10166     armnn::IWorkloadFactory& workloadFactory,
10167     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10168 {
10169     return GatherMultiDimParamsTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
10170 }
10171
10172 LayerTestResult<int16_t, 2> GatherMultiDimParamsInt16Test(
10173         armnn::IWorkloadFactory& workloadFactory,
10174         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10175 {
10176     return GatherMultiDimParamsTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
10177 }
10178
10179 LayerTestResult<float, 4> GatherMultiDimParamsMultiDimIndicesFloatTest(
10180     armnn::IWorkloadFactory& workloadFactory,
10181     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10182 {
10183     return GatherMultiDimParamsMultiDimIndicesTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager);
10184 }
10185
10186 LayerTestResult<uint8_t, 4> GatherMultiDimParamsMultiDimIndicesUint8Test(
10187     armnn::IWorkloadFactory& workloadFactory,
10188     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10189 {
10190     return GatherMultiDimParamsMultiDimIndicesTestImpl<armnn::DataType::QuantisedAsymm8>(
10191         workloadFactory, memoryManager);
10192 }
10193
10194 LayerTestResult<int16_t, 4> GatherMultiDimParamsMultiDimIndicesInt16Test(
10195         armnn::IWorkloadFactory& workloadFactory,
10196         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10197 {
10198     return GatherMultiDimParamsMultiDimIndicesTestImpl<armnn::DataType::QuantisedSymm16>(
10199             workloadFactory, memoryManager);
10200 }
10201
10202 LayerTestResult<float, 4> DequantizeSimpleUint8Test(
10203     armnn::IWorkloadFactory& workloadFactory,
10204     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10205 {
10206     return DequantizeSimpleTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
10207 }
10208
10209 LayerTestResult<float, 4> DequantizeOffsetUint8Test(
10210     armnn::IWorkloadFactory& workloadFactory,
10211     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10212 {
10213     return DequantizeOffsetTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
10214 }
10215
10216 LayerTestResult<float, 4> DequantizeSimpleInt16Test(
10217     armnn::IWorkloadFactory& workloadFactory,
10218     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10219 {
10220     return DequantizeSimpleTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
10221 }
10222
10223 LayerTestResult<uint8_t, 4> QuantizeSimpleUint8Test(
10224     armnn::IWorkloadFactory& workloadFactory,
10225     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10226 {
10227     return QuantizeSimpleTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
10228 }
10229
10230 LayerTestResult<uint8_t, 4> QuantizeClampUint8Test(
10231     armnn::IWorkloadFactory& workloadFactory,
10232     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10233 {
10234     return QuantizeClampTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
10235 }
10236
10237 LayerTestResult<int16_t, 4> QuantizeClampInt16Test(
10238     armnn::IWorkloadFactory& workloadFactory,
10239     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10240 {
10241     return QuantizeClampTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
10242 }
10243
10244 //
10245 // TransposeConvolution2d
10246 //
10247
10248 // Simple biased
10249 LayerTestResult<float, 4> SimpleTransposeConvolution2dFloatNchwTest(
10250     armnn::IWorkloadFactory& workloadFactory,
10251     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10252 {
10253     return SimpleTransposeConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
10254         workloadFactory,
10255         memoryManager,
10256         true,
10257         armnn::DataLayout::NCHW);
10258 }
10259
10260 LayerTestResult<float, 4> SimpleTransposeConvolution2dFloatNhwcTest(
10261     armnn::IWorkloadFactory& workloadFactory,
10262     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10263 {
10264     return SimpleTransposeConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
10265         workloadFactory,
10266         memoryManager,
10267         true,
10268         armnn::DataLayout::NHWC);
10269 }
10270
10271 LayerTestResult<uint8_t, 4> SimpleTransposeConvolution2dUint8NchwTest(
10272     armnn::IWorkloadFactory& workloadFactory,
10273     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10274 {
10275     return SimpleTransposeConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
10276         workloadFactory,
10277         memoryManager,
10278         true,
10279         armnn::DataLayout::NCHW);
10280 }
10281
10282 LayerTestResult<uint8_t, 4> SimpleTransposeConvolution2dUint8NhwcTest(
10283     armnn::IWorkloadFactory& workloadFactory,
10284     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10285 {
10286     return SimpleTransposeConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
10287         workloadFactory,
10288         memoryManager,
10289         true,
10290         armnn::DataLayout::NHWC);
10291 }
10292
10293 LayerTestResult<int16_t, 4> SimpleTransposeConvolution2dInt16NchwTest(
10294     armnn::IWorkloadFactory& workloadFactory,
10295     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10296 {
10297     return SimpleTransposeConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
10298         workloadFactory,
10299         memoryManager,
10300         true,
10301         armnn::DataLayout::NCHW);
10302 }
10303
10304 LayerTestResult<int16_t, 4> SimpleTransposeConvolution2dInt16NhwcTest(
10305     armnn::IWorkloadFactory& workloadFactory,
10306     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10307 {
10308     return SimpleTransposeConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
10309         workloadFactory,
10310         memoryManager,
10311         true,
10312         armnn::DataLayout::NHWC);
10313 }
10314
10315 // Simple unbiased
10316 LayerTestResult<float, 4> UnbiasedSimpleTransposeConvolution2dFloatNchwTest(
10317     armnn::IWorkloadFactory& workloadFactory,
10318     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10319 {
10320     return SimpleTransposeConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
10321         workloadFactory,
10322         memoryManager,
10323         false,
10324         armnn::DataLayout::NCHW);
10325 }
10326
10327 LayerTestResult<float, 4> UnbiasedSimpleTransposeConvolution2dFloatNhwcTest(
10328     armnn::IWorkloadFactory& workloadFactory,
10329     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10330 {
10331     return SimpleTransposeConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
10332         workloadFactory,
10333         memoryManager,
10334         false,
10335         armnn::DataLayout::NHWC);
10336 }
10337
10338 LayerTestResult<uint8_t, 4> UnbiasedSimpleTransposeConvolution2dUint8NchwTest(
10339     armnn::IWorkloadFactory& workloadFactory,
10340     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10341 {
10342     return SimpleTransposeConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
10343         workloadFactory,
10344         memoryManager,
10345         false,
10346         armnn::DataLayout::NCHW);
10347 }
10348
10349 LayerTestResult<uint8_t, 4> UnbiasedSimpleTransposeConvolution2dUint8NhwcTest(
10350     armnn::IWorkloadFactory& workloadFactory,
10351     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10352 {
10353     return SimpleTransposeConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
10354         workloadFactory,
10355         memoryManager,
10356         false,
10357         armnn::DataLayout::NHWC);
10358 }
10359
10360 LayerTestResult<int16_t, 4> UnbiasedSimpleTransposeConvolution2dInt16NchwTest(
10361     armnn::IWorkloadFactory& workloadFactory,
10362     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10363 {
10364     return SimpleTransposeConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
10365         workloadFactory,
10366         memoryManager,
10367         false,
10368         armnn::DataLayout::NCHW);
10369 }
10370
10371 LayerTestResult<int16_t, 4> UnbiasedSimpleTransposeConvolution2dInt16NhwcTest(
10372     armnn::IWorkloadFactory& workloadFactory,
10373     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10374 {
10375     return SimpleTransposeConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
10376         workloadFactory,
10377         memoryManager,
10378         false,
10379         armnn::DataLayout::NHWC);
10380 }
10381
10382 // Padded biased
10383 LayerTestResult<float, 4> PaddedTransposeConvolution2dFloatNchwTest(
10384     armnn::IWorkloadFactory& workloadFactory,
10385     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10386 {
10387     return PaddedTransposeConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
10388         workloadFactory,
10389         memoryManager,
10390         true,
10391         armnn::DataLayout::NCHW);
10392 }
10393
10394 LayerTestResult<float, 4> PaddedTransposeConvolution2dFloatNhwcTest(
10395     armnn::IWorkloadFactory& workloadFactory,
10396     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10397 {
10398     return PaddedTransposeConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
10399         workloadFactory,
10400         memoryManager,
10401         true,
10402         armnn::DataLayout::NHWC);
10403 }
10404
10405 LayerTestResult<uint8_t, 4> PaddedTransposeConvolution2dUint8NchwTest(
10406     armnn::IWorkloadFactory& workloadFactory,
10407     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10408 {
10409     return PaddedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
10410         workloadFactory,
10411         memoryManager,
10412         true,
10413         armnn::DataLayout::NCHW);
10414 }
10415
10416 LayerTestResult<uint8_t, 4> PaddedTransposeConvolution2dUint8NhwcTest(
10417     armnn::IWorkloadFactory& workloadFactory,
10418     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10419 {
10420     return PaddedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
10421         workloadFactory,
10422         memoryManager,
10423         true,
10424         armnn::DataLayout::NHWC);
10425 }
10426
10427 LayerTestResult<int16_t, 4> PaddedTransposeConvolution2dInt16NchwTest(
10428     armnn::IWorkloadFactory& workloadFactory,
10429     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10430 {
10431     return PaddedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
10432         workloadFactory,
10433         memoryManager,
10434         true,
10435         armnn::DataLayout::NCHW);
10436 }
10437
10438 LayerTestResult<int16_t, 4> PaddedTransposeConvolution2dInt16NhwcTest(
10439     armnn::IWorkloadFactory& workloadFactory,
10440     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10441 {
10442     return PaddedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
10443         workloadFactory,
10444         memoryManager,
10445         true,
10446         armnn::DataLayout::NHWC);
10447 }
10448
10449 // Padded unbiased
10450 LayerTestResult<float, 4> UnbiasedPaddedTransposeConvolution2dFloatNchwTest(
10451     armnn::IWorkloadFactory& workloadFactory,
10452     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10453 {
10454     return PaddedTransposeConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
10455         workloadFactory,
10456         memoryManager,
10457         false,
10458         armnn::DataLayout::NCHW);
10459 }
10460
10461 LayerTestResult<float, 4> UnbiasedPaddedTransposeConvolution2dFloatNhwcTest(
10462     armnn::IWorkloadFactory& workloadFactory,
10463     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10464 {
10465     return PaddedTransposeConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
10466         workloadFactory,
10467         memoryManager,
10468         false,
10469         armnn::DataLayout::NHWC);
10470 }
10471
10472 LayerTestResult<uint8_t, 4> UnbiasedPaddedTransposeConvolution2dUint8NchwTest(
10473     armnn::IWorkloadFactory& workloadFactory,
10474     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10475 {
10476     return PaddedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
10477         workloadFactory,
10478         memoryManager,
10479         false,
10480         armnn::DataLayout::NCHW);
10481 }
10482
10483 LayerTestResult<uint8_t, 4> UnbiasedPaddedTransposeConvolution2dUint8NhwcTest(
10484     armnn::IWorkloadFactory& workloadFactory,
10485     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10486 {
10487     return PaddedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
10488         workloadFactory,
10489         memoryManager,
10490         false,
10491         armnn::DataLayout::NHWC);
10492 }
10493
10494 LayerTestResult<int16_t, 4> UnbiasedPaddedTransposeConvolution2dInt16NchwTest(
10495     armnn::IWorkloadFactory& workloadFactory,
10496     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10497 {
10498     return PaddedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
10499         workloadFactory,
10500         memoryManager,
10501         false,
10502         armnn::DataLayout::NCHW);
10503 }
10504
10505 LayerTestResult<int16_t, 4> UnbiasedPaddedTransposeConvolution2dInt16NhwcTest(
10506     armnn::IWorkloadFactory& workloadFactory,
10507     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10508 {
10509     return PaddedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
10510         workloadFactory,
10511         memoryManager,
10512         false,
10513         armnn::DataLayout::NHWC);
10514 }
10515
10516 // Strided biased
10517 LayerTestResult<float, 4> StridedTransposeConvolution2dFloatNchwTest(
10518     armnn::IWorkloadFactory& workloadFactory,
10519     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10520 {
10521     return StridedTransposeConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
10522         workloadFactory,
10523         memoryManager,
10524         true,
10525         armnn::DataLayout::NCHW);
10526 }
10527
10528 LayerTestResult<float, 4> StridedTransposeConvolution2dFloatNhwcTest(
10529     armnn::IWorkloadFactory& workloadFactory,
10530     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10531 {
10532     return StridedTransposeConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
10533         workloadFactory,
10534         memoryManager,
10535         true,
10536         armnn::DataLayout::NHWC);
10537 }
10538
10539 LayerTestResult<uint8_t, 4> StridedTransposeConvolution2dUint8NchwTest(
10540     armnn::IWorkloadFactory& workloadFactory,
10541     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10542 {
10543     return StridedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
10544         workloadFactory,
10545         memoryManager,
10546         true,
10547         armnn::DataLayout::NCHW);
10548 }
10549
10550 LayerTestResult<uint8_t, 4> StridedTransposeConvolution2dUint8NhwcTest(
10551     armnn::IWorkloadFactory& workloadFactory,
10552     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10553 {
10554     return StridedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
10555         workloadFactory,
10556         memoryManager,
10557         true,
10558         armnn::DataLayout::NHWC);
10559 }
10560
10561 LayerTestResult<int16_t, 4> StridedTransposeConvolution2dInt16NchwTest(
10562     armnn::IWorkloadFactory& workloadFactory,
10563     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10564 {
10565     return StridedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
10566         workloadFactory,
10567         memoryManager,
10568         true,
10569         armnn::DataLayout::NCHW);
10570 }
10571
10572 LayerTestResult<int16_t, 4> StridedTransposeConvolution2dInt16NhwcTest(
10573     armnn::IWorkloadFactory& workloadFactory,
10574     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10575 {
10576     return StridedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
10577         workloadFactory,
10578         memoryManager,
10579         true,
10580         armnn::DataLayout::NHWC);
10581 }
10582
10583 // Strided unbiased
10584 LayerTestResult<float, 4> UnbiasedStridedTransposeConvolution2dFloatNchwTest(
10585     armnn::IWorkloadFactory& workloadFactory,
10586     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10587 {
10588     return StridedTransposeConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
10589         workloadFactory,
10590         memoryManager,
10591         false,
10592         armnn::DataLayout::NCHW);
10593 }
10594
10595 LayerTestResult<float, 4> UnbiasedStridedTransposeConvolution2dFloatNhwcTest(
10596     armnn::IWorkloadFactory& workloadFactory,
10597     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10598 {
10599     return StridedTransposeConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
10600         workloadFactory,
10601         memoryManager,
10602         false,
10603         armnn::DataLayout::NHWC);
10604 }
10605
10606 LayerTestResult<uint8_t, 4> UnbiasedStridedTransposeConvolution2dUint8NchwTest(
10607     armnn::IWorkloadFactory& workloadFactory,
10608     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10609 {
10610     return StridedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
10611         workloadFactory,
10612         memoryManager,
10613         false,
10614         armnn::DataLayout::NCHW);
10615 }
10616
10617 LayerTestResult<uint8_t, 4> UnbiasedStridedTransposeConvolution2dUint8NhwcTest(
10618     armnn::IWorkloadFactory& workloadFactory,
10619     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10620 {
10621     return StridedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
10622         workloadFactory,
10623         memoryManager,
10624         false,
10625         armnn::DataLayout::NHWC);
10626 }
10627
10628 LayerTestResult<int16_t, 4> UnbiasedStridedTransposeConvolution2dInt16NchwTest(
10629     armnn::IWorkloadFactory& workloadFactory,
10630     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10631 {
10632     return StridedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
10633         workloadFactory,
10634         memoryManager,
10635         false,
10636         armnn::DataLayout::NCHW);
10637 }
10638
10639 LayerTestResult<int16_t, 4> UnbiasedStridedTransposeConvolution2dInt16NhwcTest(
10640     armnn::IWorkloadFactory& workloadFactory,
10641     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10642 {
10643     return StridedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
10644         workloadFactory,
10645         memoryManager,
10646         false,
10647         armnn::DataLayout::NHWC);
10648 }