IVGCVSW-3616 Add multi-channel unit test for TransposeConvolution2d
[platform/upstream/armnn.git] / src / backends / backendsCommon / test / LayerTests.cpp
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #include "LayerTests.hpp"
6 #include "WorkloadTestUtils.hpp"
7 #include "TensorUtils.hpp"
8 #include <ResolveType.hpp>
9
10 #include "test/TensorHelpers.hpp"
11 #include "TensorCopyUtils.hpp"
12 #include "Permute.hpp"
13
14 #include <boost/test/unit_test.hpp>
15 #include <boost/assert.hpp>
16
17 #include <armnn/LayerSupport.hpp>
18
19 #include <backendsCommon/CpuTensorHandle.hpp>
20 #include <backendsCommon/IBackendInternal.hpp>
21 #include <backendsCommon/WorkloadFactory.hpp>
22
23 #include <reference/workloads/RefWorkloads.hpp>
24
25 #include <algorithm>
26 #include <boost/cast.hpp>
27
28 #include "WorkloadTestUtils.hpp"
29 #include "Conv2dTestImpl.hpp"
30 #include "BatchNormTestImpl.hpp"
31 #include "ActivationTestImpl.hpp"
32 #include "Pooling2dTestImpl.hpp"
33 #include "FullyConnectedTestImpl.hpp"
34 #include "GatherTestImpl.hpp"
35 #include "SpaceToBatchNdTestImpl.hpp"
36 #include "SpaceToDepthTestImpl.hpp"
37 #include "SplitterTestImpl.hpp"
38 #include "SoftmaxTestImpl.hpp"
39 #include "StridedSliceTestImpl.hpp"
40 #include "NormTestImpl.hpp"
41 #include "LstmTestImpl.hpp"
42 #include "ConvertFp16ToFp32TestImpl.hpp"
43 #include "ConvertFp32ToFp16TestImpl.hpp"
44 #include "DebugTestImpl.hpp"
45 #include "DequantizeTestImpl.hpp"
46 #include "QuantizeTestImpl.hpp"
47 #include "TransposeConvolution2dTestImpl.hpp"
48
49 // 3-channel 16x8 image used as common input data for a number of Conv2d tests.
50 static std::vector<float> ConvInput3x8x16({
51     0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
52     0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
53     0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
54     0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
55     0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
56     0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
57     0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
58     0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
59     0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
60     0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
61     0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
62     0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
63     0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
64     0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
65     0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
66     0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
67     -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
68     -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
69     -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
70     -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
71     -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
72     -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
73     -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
74     -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1
75 });
76
77 // 2-channel bias used by a number of Conv2d tests.
78 static std::vector<float> Bias2({0, 2});
79
80 struct Simple3dSoftmaxOutputData
81 {
82     const std::vector<float> outputData =
83             {
84                 0.0964599f, 0.26220518f, 0.0964599f, 0.0964599f,
85                 0.15903549f, 0.0964599f, 0.0964599f, 0.0964599f
86             };
87
88     const armnn::TensorShape inputShape{ 1, 8, 1 };
89
90     const std::vector<float> inputData =
91             {
92                     0.f, 1.f, 0.f, 0.f,
93                     .5f, 0.f, 0.f, 0.f,
94             };
95 };
96
97 struct Simple4dSoftmaxData
98 {
99     const armnn::TensorShape inputShape{ 1, 8, 1, 1 };
100
101     const std::vector<float> outputData = { 0.0964599f, 0.26220518f, 0.0964599f, 0.0964599f,
102                                             0.15903549f, 0.0964599f, 0.0964599f, 0.0964599f };
103     const std::vector<float> inputData =
104             {
105                     0.f, 1.f, 0.f, 0.f,
106                     .5f, 0.f, 0.f, 0.f
107             };
108 };
109
110 // Helper function that returns either Bias2 or an empty vector depending on whether bias is enabled.
111 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
112 boost::multi_array<T, 1> GetBias2(bool biasEnabled, float qScale)
113 {
114     if(biasEnabled)
115     {
116         armnn::TensorInfo biasDesc({static_cast<unsigned int>(Bias2.size())}, ArmnnType);
117         boost::multi_array<T, 1> bias = MakeTensor<T, 1>(biasDesc, QuantizedVector<T>(qScale, 0.0f, Bias2));
118         return bias;
119     }
120     else
121     {
122         return boost::multi_array<T, 1>();
123     }
124 }
125
126 template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
127 LayerTestResult<T, 4> SimpleConvolution2d3x5TestCommon(
128     armnn::IWorkloadFactory& workloadFactory,
129     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
130     float qScale,
131     int32_t qOffset,
132     bool biasEnabled,
133     const armnn::DataLayout layout)
134 {
135     // Use common single-batch 3-channel 16x8 image.
136     armnn::TensorInfo inputDesc({1, 3, 8, 16}, ArmnnType);
137     boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, QuantizedVector<T>(qScale, qOffset, ConvInput3x8x16));
138
139     // Use a 2-element batch with 3-channel 3x5 kernels.
140     armnn::TensorInfo kernelDesc({2, 3, 5, 3}, ArmnnType);
141     boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
142         QuantizedVector<T>(qScale, qOffset, {
143             1, 1, 1,
144             1, -1, 1,
145             1, 1, 1,
146             1, 1, 1,
147             1, 1, 1,
148
149             0, 0, 0,
150             0, 0, 0,
151             0, 0, 0,
152             0, 0, 0,
153             0, 0, 0,
154
155             2, 2, 2,
156             2, 2, 2,
157             2, 2, 2,
158             2, 2, 2,
159             2, 2, 2,
160
161
162             0, 0, 0,
163             0, 0, 0,
164             0, 0, 0,
165             0, 0, 0,
166             0, 0, 0,
167
168             1, 1, 1,
169             1, 1, 1,
170             1, 1, 1,
171             1, 1, 1,
172             1, 1, 1,
173
174             0, 0, 0,
175             0, 0, 0,
176             0, 0, 0,
177             0, 0, 0,
178             0, 0, 0
179         })));
180
181     // Expected output is 2 batch elements of a 1-channel 14x4 image.
182     armnn::TensorInfo outputDesc({1, 2, 4, 14}, ArmnnType);
183     boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
184         QuantizedVector<T>(qScale, qOffset, {
185             -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24,
186             -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25,
187             -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f,
188             -23.5f, -23.5f, -23.5f,
189             -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f,
190             -23.5f, -23.5f, -23.5f,
191
192             5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
193             5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
194             5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
195             5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
196         })));
197
198     return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
199         workloadFactory,
200         memoryManager,
201         input,
202         kernel,
203         GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
204         expectedOutput,
205         qScale,
206         qOffset,
207         layout);
208 }
209
210 template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
211          typename T = armnn::ResolveType<ArmnnType>>
212 LayerTestResult<T, 4> SimpleConvolution2d3x3TestCommon(
213     armnn::IWorkloadFactory& workloadFactory,
214     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
215     float qScale,
216     int32_t qOffset,
217     bool biasEnabled,
218     const armnn::DataLayout layout)
219 {
220     // Use a 3x3 kernel, which exercises ArmCompute's direct convolution path.
221
222     // Use common single-batch 3-channel 16x8 image.
223     armnn::TensorInfo inputDesc({1, 3, 8, 16}, ArmnnType);
224     boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, QuantizedVector<T>(qScale, qOffset, ConvInput3x8x16));
225
226     // Use a 2-element batch of 3-channel 3x3 kernels.
227     armnn::TensorInfo kernelDesc({2, 3, 3, 3}, ArmnnType);
228     boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
229         QuantizedVector<T>(qScale, qOffset, {
230             1, 1, 1,
231             1, -1, 1,
232             1, 1, 1,
233
234             0, 0, 0,
235             0, 0, 0,
236             0, 0, 0,
237
238             2, 2, 2,
239             2, 2, 2,
240             2, 2, 2,
241
242
243             0, 0, 0,
244             0, 0, 0,
245             0, 0, 0,
246
247             1, 1, 1,
248             1, 1, 1,
249             1, 1, 1,
250
251             0, 0, 0,
252             0, 0, 0,
253             0, 0, 0
254         })));
255
256     // Expected output is 1 batch of a 2-channel 14x6 image.
257     armnn::TensorInfo outputDesc({1, 2, 6, 14}, ArmnnType);
258     boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
259         QuantizedVector<T>(qScale, qOffset, {
260             -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15,
261             -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16,
262             -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
263             -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
264             -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
265             -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
266
267             3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
268             3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
269             3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
270             3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
271             3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
272             3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
273         })));
274
275     return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
276         workloadFactory,
277         memoryManager,
278         input,
279         kernel,
280         GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
281         expectedOutput,
282         qScale,
283         qOffset,
284         layout);
285 }
286
287 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
288 LayerTestResult<T, 4> SimpleConvolution2d3x3NhwcTestCommon(
289     armnn::IWorkloadFactory& workloadFactory,
290     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
291     float qScale,
292     int32_t qOffset,
293     bool biasEnabled,
294     armnn::DataLayout dataLayout)
295 {
296     // Use common single-batch 5x5 image.
297
298     armnn::TensorInfo inputDesc({1, 3, 4, 1}, ArmnnType);
299     boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc,
300                                                       {
301                                                        1, 5, 2, 3,
302                                                        8, 7, 3, 6,
303                                                        3, 3, 9, 1
304                                                        });
305
306
307     // Use a 2-element batch of 3-channel 3x3 kernels.
308     armnn::TensorInfo kernelDesc({1, 3, 3, 1}, ArmnnType);
309     boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, {
310                                                                     4, 5, 6,
311                                                                     0, 0, 0,
312                                                                     3, 2, 1
313                                                                     });
314
315     // Expected output is 1 batch of a 5x5 image.
316     armnn::TensorInfo outputDesc({1, 3, 4, 1}, ArmnnType);
317
318     const std::vector<float> outputData =
319             {
320                     23, 41, 33, 21,
321                     44, 65, 76, 52,
322                     82, 85, 79, 42
323             };
324
325     boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, outputData);
326
327     return SimpleConvolution2dNhwcTestImpl<ArmnnType, ArmnnType>(
328         workloadFactory,
329         memoryManager,
330         input,
331         kernel,
332         boost::multi_array<T, 1>(),
333         expectedOutput,
334         dataLayout,
335         qScale,
336         qOffset);
337 }
338
339 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
340 LayerTestResult<T, 4> SimpleConvolution2d3x3Stride2x2TestCommon(
341         armnn::IWorkloadFactory& workloadFactory,
342         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
343         float qScale,
344         int32_t qOffset,
345         bool biasEnabled,
346         const armnn::DataLayout& dataLayout)
347 {
348     // Input is a single-batch, 1 channel, 5x5 image.
349     armnn::TensorInfo inputDesc({1, 5, 5, 1}, ArmnnType);
350     boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc,
351             {
352                 1, 5, 2, 3, 5,
353                 8, 7, 3, 6, 3,
354                 3, 3, 9, 1, 9,
355                 4, 1, 8, 1, 3,
356                 6, 8, 1, 9, 2
357             });
358
359     // Use a 3x3 kernel.
360     armnn::TensorInfo kernelDesc({1, 3, 3, 1}, ArmnnType);
361     boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc,
362             {
363                 4, 5, 6,
364                 0, 0, 0,
365                 3, 2, 1
366             });
367
368     // Expected output is a single-batch, 1 channel, 3x3 image.
369     armnn::TensorInfo outputDesc({1, 3, 3, 1}, ArmnnType);
370
371     const std::vector<T> outputData =
372             {
373                 23, 33, 24,
374                 91, 99, 48,
375                 26, 50, 19
376             };
377
378     boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, outputData);
379
380     uint32_t padLeft = 1;
381     uint32_t padTop = 1;
382     uint32_t padRight = 1;
383     uint32_t padBottom = 1;
384     uint32_t strideX  = 2;
385     uint32_t strideY  = 2;
386
387     return SimpleConvolution2dNhwcTestImpl<ArmnnType, ArmnnType>(
388         workloadFactory,
389         memoryManager,
390         input,
391         kernel,
392         boost::multi_array<T, 1>(),
393         expectedOutput,
394         dataLayout,
395         qScale,
396         qOffset,
397         padLeft,
398         padTop,
399         padRight,
400         padBottom,
401         strideX,
402         strideY);
403 }
404
405 LayerTestResult<float, 4> SimpleConvolution2d3x5Test(
406     armnn::IWorkloadFactory& workloadFactory,
407     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
408     bool biasEnabled,
409     const armnn::DataLayout layout)
410 {
411     return SimpleConvolution2d3x5TestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
412         workloadFactory, memoryManager, 0.f, 0, biasEnabled, layout);
413 }
414
415 LayerTestResult<uint8_t, 4> SimpleConvolution2d3x5Uint8Test(
416     armnn::IWorkloadFactory& workloadFactory,
417     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
418     bool biasEnabled,
419     const armnn::DataLayout layout)
420 {
421     return SimpleConvolution2d3x5TestCommon<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
422         workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
423 }
424
425 LayerTestResult<float, 4> SimpleConvolution2d3x3Test(
426     armnn::IWorkloadFactory& workloadFactory,
427     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
428     bool biasEnabled,
429     const armnn::DataLayout layout)
430 {
431     return SimpleConvolution2d3x3TestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
432         workloadFactory, memoryManager, 0.f, 0, biasEnabled, layout);
433 }
434
435 LayerTestResult<float, 4> SimpleConvolution2d3x3NhwcTest(
436     armnn::IWorkloadFactory& workloadFactory,
437     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
438     bool biasEnabled)
439 {
440     return SimpleConvolution2d3x3NhwcTestCommon<armnn::DataType::Float32>(
441         workloadFactory,
442         memoryManager,
443         0.f,
444         0,
445         biasEnabled,
446         armnn::DataLayout::NHWC);
447 }
448
449 LayerTestResult<float, 4> SimpleConvolution2d3x3Stride2x2Test(
450         armnn::IWorkloadFactory& workloadFactory,
451         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
452         bool biasEnabled,
453         const armnn::DataLayout layout)
454 {
455     return SimpleConvolution2d3x3Stride2x2TestCommon<armnn::DataType::Float32>(
456         workloadFactory,
457         memoryManager,
458         0.f,
459         0,
460         biasEnabled,
461         layout);
462 }
463
464 LayerTestResult<uint8_t, 4> SimpleConvolution2d3x3Uint8Test(
465     armnn::IWorkloadFactory& workloadFactory,
466     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
467     bool biasEnabled,
468     const armnn::DataLayout layout)
469 {
470     return SimpleConvolution2d3x3TestCommon<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
471         workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
472 }
473
474 LayerTestResult<int16_t, 4> SimpleConvolution2d3x5QSymm16Test(
475     armnn::IWorkloadFactory& workloadFactory,
476     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
477     bool biasEnabled,
478     const armnn::DataLayout layout)
479 {
480 return SimpleConvolution2d3x5TestCommon<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
481         workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
482 }
483
484 LayerTestResult<int16_t, 4> SimpleConvolution2d3x3QSymm16Test(
485     armnn::IWorkloadFactory& workloadFactory,
486     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
487     bool biasEnabled,
488     const armnn::DataLayout layout)
489 {
490     return SimpleConvolution2d3x3TestCommon<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
491             workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
492 }
493
494 template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
495          typename T = armnn::ResolveType<ArmnnType>>
496 LayerTestResult<T, 4> Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTestCommon(
497     armnn::IWorkloadFactory& workloadFactory,
498     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
499     const armnn::DataLayout layout,
500     float qScale,
501     int32_t qOffset)
502 {
503     // Use a single-batch 1-channel 3x3 image as input.
504     armnn::TensorInfo inputDesc({1, 1, 3, 3}, ArmnnType);
505     boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, std::vector<T>(
506         QuantizedVector<T>(qScale, qOffset, {
507             11,21,31,
508             12,22,32,
509             13,23,33
510         })));
511
512     // Use 1 batch of a 1-channel 2x2 kernel.
513     armnn::TensorInfo kernelDesc({1, 1, 2, 2}, ArmnnType);
514     boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
515         QuantizedVector<T>(qScale, qOffset, {
516             -11,-21,
517             -12,-22,
518         })));
519
520 // Expected output is 1 batch of a 1-channel 6x8 image.
521 // Manually calculated like this:
522 //[-11*0 -21*0  -12*0 -22*0  ; -11*0  -21*0  -12*0  -22*0  ; -11*0  -21*0  -12*0  -22*0  ; -11*0  -21*0 -12*0  -22*0 ..]
523 //[-11*0 -21*0  -12*0 -22*11 ; -11*0  -21*0  -12*11 -22*21 ; -11*0  -21*0  -12*21 -22*31 ; -11*0  -21*0 -12*31 -22*0 ..]
524 //[-11*0 -21*11 -12*0 -22*12 ; -11*11 -21*21 -12*12 -22*22 ; -11*21 -21*31 -12*22 -22*32 ; -11*31 -21*0 -12*32 -22*0 ..]
525 //[-11*0 -21*12 -12*0 -22*13 ; -11*12 -21*22 -12*13 -22*23 ; -11*22 -21*32 -12*23 -22*33 ; -11*32 -21*0 -12*33 -22*0 ..]
526 //[-11*0 -21*13 -12*0 -22*0  ; -11*13 -21*23 -12*0  -22*0  ; -11*23 -21*33 -12*0  -22*0  ; -11*33 -21*0 -12*0  -22*0 ..]
527 //[-11*0 -21*0  -12*0 -22*0  ; -11*0  -21*0  -12*0  -22*0  ; -11*0  -21*0  -12*0  -22*0  ; -11*0  -21*0 -12*0  -22*0 ..]
528 //[..... .....  ..... .....  ; .....  .....  .....  .....  ; .....  .....  .....  .....  ; .....  ..... .....  ..... ..]
529     armnn::TensorInfo outputDesc({1, 1, 8, 6}, ArmnnType);
530     boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
531         QuantizedVector<T>(qScale, qOffset, {
532                0,    0,      0,    0,    0,    0,
533             -242,  -594,  -934, -372,    0,    0,
534             -495, -1190, -1850, -725,    0,    0,
535             -538, -1256, -1916, -748,    0,    0,
536             -273, -626,  -946,  -363,    0,    0,
537                0,    0,     0,     0,    0,    0,
538                0,    0,     0,     0,    0,    0,
539                0,    0,     0,     0,    0,    0
540         })));
541
542     return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
543         workloadFactory,
544         memoryManager,
545         input,
546         kernel,
547         GetBias2<ArmnnBType>(false, qScale * qScale),
548         expectedOutput,
549         qScale,
550         qOffset,
551         layout,
552         1,  // Padding left.
553         2,  // Padding top.
554         3,  // Padding right.
555         4); // Padding bottom.
556 }
557
558 template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
559          typename T = armnn::ResolveType<ArmnnType>>
560 LayerTestResult<T, 4> SimpleConvolution2dAsymmetricPaddingTestCommon(
561     armnn::IWorkloadFactory& workloadFactory,
562     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
563     const armnn::DataLayout layout,
564     float qScale,
565     int32_t qOffset)
566 {
567     // Use a single-batch 1-channel 5x5 image as input.
568     armnn::TensorInfo inputDesc({ 1, 1, 5, 5 }, ArmnnType);
569     boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, std::vector<T>(
570         QuantizedVector<T>(qScale, qOffset, {
571             11,21,31,41,51,
572             12,22,32,42,52,
573             13,23,33,43,53,
574             14,24,34,44,54,
575             15,25,35,45,55,
576         })));
577
578     // Use 1 batch of a 1-channel 4x4 kernel.
579     armnn::TensorInfo kernelDesc({ 1, 1, 4, 4 }, ArmnnType);
580     boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
581         QuantizedVector<T>(qScale, qOffset, {
582             -11,-21,-31,-41,
583             -12,-22,-32,-42,
584             -13,-23,-33,-43,
585             -14,-24,-34,-44,
586         })));
587
588     // Expected output is 1 batch of a 1-channel 5x5 image.
589     armnn::TensorInfo outputDesc({ 1, 1, 5, 5 }, ArmnnType);
590     std::vector<T> myVec(outputDesc.GetNumElements(), 0);
591     boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
592         QuantizedVector<T>(qScale, qOffset, {
593             -7140, -10580, -13940,  -9300, -5230,
594             -9590, -14120, -18520, -12290, -6860,
595             -9980, -14560, -18960, -12560, -7000,
596             -7518, -10904, -14144,  -9318, -5152,
597             -5032,  -7256,  -9376,  -6142, -3368,
598         })));
599
600     return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
601         workloadFactory,
602         memoryManager,
603         input,
604         kernel,
605         GetBias2<ArmnnBType>(false, qScale * qScale),
606         expectedOutput,
607         qScale,
608         qOffset,
609         layout,
610         1,  // Padding left.
611         1,  // Padding top.
612         2,  // Padding right.
613         2); // Padding bottom.
614 }
615
616 LayerTestResult<float, 4> Convolution2dAsymmetricPaddingTest(
617     armnn::IWorkloadFactory& workloadFactory,
618     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
619     armnn::DataLayout layout)
620 {
621     return SimpleConvolution2dAsymmetricPaddingTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
622             workloadFactory, memoryManager, layout, 0.0f, 0);
623 }
624
625 LayerTestResult<float, 4> Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTest(
626     armnn::IWorkloadFactory& workloadFactory,
627     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
628     armnn::DataLayout layout)
629 {
630     return Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTestCommon
631             <armnn::DataType::Float32, armnn::DataType::Float32>(
632             workloadFactory, memoryManager, layout, 0.0f, 0);
633 }
634
635 LayerTestResult<float, 4> Convolution1dTest(
636     armnn::IWorkloadFactory& workloadFactory,
637     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
638     bool biasEnabled)
639 {
640     return Convolution1dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
641             workloadFactory, memoryManager, 0.0f, 0, biasEnabled);
642 }
643
644 LayerTestResult<uint8_t, 4> Convolution1dUint8Test(
645     armnn::IWorkloadFactory& workloadFactory,
646     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
647     bool biasEnabled)
648 {
649     return Convolution1dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
650             workloadFactory, memoryManager, 0.1f, 128, biasEnabled);
651 }
652
653 LayerTestResult<float,4> CompareConvolution2dTest(
654     armnn::IWorkloadFactory& workloadFactory,
655     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
656     armnn::IWorkloadFactory& refWorkloadFactory)
657 {
658     return CompareConvolution2dTestImpl<armnn::DataType::Float32>(
659             workloadFactory, memoryManager, refWorkloadFactory);
660 }
661
662 template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
663 LayerTestResult<T, 4> Convolution2d3x3DilationTestCommon(
664     armnn::IWorkloadFactory& workloadFactory,
665     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
666     const std::vector<float>& inputNoQuantizedValues,
667     armnn::TensorInfo& inputTensorInfo,
668     const std::vector<float>& kernelNoQuantizedValues,
669     armnn::TensorInfo& kernelTensorInfo,
670     const std::vector<float>& outputExpectedNoQuantizedValues,
671     armnn::TensorInfo& outputTensorInfo,
672     uint32_t dilationX,
673     uint32_t dilationY,
674     armnn::DataLayout layout = armnn::DataLayout::NCHW,
675     uint32_t padLeft = 0,
676     uint32_t padTop = 0,
677     uint32_t padRight = 0,
678     uint32_t padBottom = 0,
679     uint32_t strideX  = 1,
680     uint32_t strideY  = 1,
681     bool biasEnabled = false
682 )
683 {
684     float qScale;
685     int32_t qOffset;
686     switch (ArmnnType)
687     {
688         case armnn::DataType::QuantisedAsymm8:
689         {
690             qScale = 0.1f;
691             qOffset = 128;
692             break;
693         }
694         case armnn::DataType::QuantisedSymm16:
695         {
696             qScale = 0.1f;
697             qOffset = 0;
698             break;
699         }
700         case armnn::DataType::Float32:
701         default:
702         {
703             qScale = 0.f;
704             qOffset = 0;
705             break;
706         }
707     }
708
709     inputTensorInfo.SetQuantizationScale(qScale);
710     inputTensorInfo.SetQuantizationOffset(qOffset);
711     kernelTensorInfo.SetQuantizationScale(qScale);
712     kernelTensorInfo.SetQuantizationOffset(qOffset);
713     outputTensorInfo.SetQuantizationScale(qScale);
714     outputTensorInfo.SetQuantizationOffset(qOffset);
715
716     auto input = MakeTensor<T, 4>(inputTensorInfo,
717                                   std::vector<T>(QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(),
718                                                                     inputTensorInfo.GetQuantizationOffset(),
719                                                                     inputNoQuantizedValues)));
720     auto kernel = MakeTensor<T, 4>(kernelTensorInfo,
721                                   std::vector<T>(QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(),
722                                                                     kernelTensorInfo.GetQuantizationOffset(),
723                                                                     kernelNoQuantizedValues)));
724     auto expectedOutput = MakeTensor<T, 4>(outputTensorInfo,
725                                            std::vector<T>(QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(),
726                                                                              outputTensorInfo.GetQuantizationOffset(),
727                                                                              outputExpectedNoQuantizedValues)));
728
729     return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
730             workloadFactory,
731             memoryManager,
732             input,
733             kernel,
734             GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
735             expectedOutput,
736             qScale,
737             qOffset,
738             layout,
739             padLeft,
740             padTop,
741             padRight,
742             padBottom,
743             strideX,
744             strideY,
745             dilationX,
746             dilationY);
747 }
748
749 template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
750 LayerTestResult<T, 4> Convolution2d3x3Dilation3x3Test(
751     armnn::IWorkloadFactory& workloadFactory,
752     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
753     bool biasEnabled,
754     const armnn::DataLayout layout)
755 {
756     armnn::TensorInfo inputTensorInfo({1, 1, 10, 10}, ArmnnType);
757     std::vector<float> inputNoQuantizedValues =
758     {
759         0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
760         0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
761         0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
762         0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
763         0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
764         0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
765         0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
766         0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
767         0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
768         0, 0, 0, 0, 0, 0, 0, 0, 0, 0
769     };
770
771     armnn::TensorInfo kernelTensorInfo({ 1, 1, 3, 3}, ArmnnType);
772     std::vector<float> kernelNoQuantizedValues =
773     {
774         1, 2, 3,
775         4, 5, 6,
776         7, 8, 9
777     };
778
779     // Since the dilation rate is 3 this will dilate the kernel to be like 7x7,
780     // therefore the output will be 4x4: (I−K+2P)/S +1 => (10-7 +0)/1 +1
781     armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4}, ArmnnType);
782     std::vector<float> outputExpectedNoQuantizedValues =
783     {
784         6., 5., 5., 5.,
785         6., 5., 5., 5.,
786         6., 5., 5., 5.,
787         3., 2., 2., 2.
788     };
789
790     return Convolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
791             workloadFactory,
792             memoryManager,
793             inputNoQuantizedValues,
794             inputTensorInfo,
795             kernelNoQuantizedValues,
796             kernelTensorInfo,
797             outputExpectedNoQuantizedValues,
798             outputTensorInfo,
799             3,
800             3,
801             layout,
802             biasEnabled);
803 }
804
805 template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
806 LayerTestResult<T, 4> Convolution2d2x3x3Dilation3x3Test(
807     armnn::IWorkloadFactory& workloadFactory,
808     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
809     bool biasEnabled,
810     const armnn::DataLayout layout)
811 {
812     armnn::TensorInfo inputTensorInfo({1, 2, 10, 10}, ArmnnType);
813     std::vector<float> inputNoQuantizedValues =
814     {
815         0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
816         0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
817         0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
818         0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
819         0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
820         0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
821         0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
822         0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
823         0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
824         0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
825
826         0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
827         0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
828         0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
829         0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
830         0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
831         0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
832         0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
833         0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
834         0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
835         0, 0, 0, 0, 0, 0, 0, 0, 0, 0
836     };
837
838     armnn::TensorInfo kernelTensorInfo({ 1, 2, 3, 3}, ArmnnType);
839     std::vector<float> kernelNoQuantizedValues =
840     {
841         1, 2, 3,
842         4, 5, 6,
843         7, 8, 9,
844
845         1, 2, 3,
846         4, 5, 6,
847         7, 8, 9
848     };
849
850     // Since the dilation rate is 3 this will dilate the kernel to be like 7x7,
851     // therefore the output will be 4x4: (I−K+2P)/S +1 => (10-7 +0)/1 +1
852     armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4}, ArmnnType);
853     std::vector<float> outputExpectedNoQuantizedValues =
854     {
855         12., 10., 10., 10.,
856         12., 10., 10., 10.,
857         12., 10., 10., 10.,
858          6.,  4.,  4.,  4.
859     };
860
861     return Convolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
862             workloadFactory,
863             memoryManager,
864             inputNoQuantizedValues,
865             inputTensorInfo,
866             kernelNoQuantizedValues,
867             kernelTensorInfo,
868             outputExpectedNoQuantizedValues,
869             outputTensorInfo,
870             3,
871             3,
872             layout,
873             biasEnabled);
874 }
875
876 template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
877 LayerTestResult<T, 4> Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test(
878         armnn::IWorkloadFactory &workloadFactory,
879         const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager,
880         bool biasEnabled,
881         const armnn::DataLayout layout)
882 {
883     armnn::TensorInfo inputTensorInfo({1, 1, 10, 10}, ArmnnType);
884     std::vector<float> inputNoQuantizedValues =
885     {
886         1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
887         1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
888         1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
889         1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
890         1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
891         1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
892         1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
893         1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
894         1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
895         1, 1, 1, 1, 1, 1, 1, 1, 1, 1
896     };
897
898     armnn::TensorInfo kernelTensorInfo({ 1, 1, 2, 2}, ArmnnType);
899     std::vector<float> kernelNoQuantizedValues =
900     {
901         1, 2,
902         3, 4
903     };
904
905     // Since the dilation rate is 2 this will dilate the kernel to be like 3x3: d(K-1)+1 --> 2 x (2-1) + 1 = 3,
906     // therefore the output will be 4x4: (I − K + 2P)/S +1 => trunc ( (10 - 3 + 2x2 ) / 3 + 1 )
907     // where, dilation size = d = 2; kernel size = K = 2; input size = I = 10; padding size = P = 2; stride = S = 3
908     armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4}, ArmnnType);
909     std::vector<float> outputExpectedNoQuantizedValues =
910     {
911         4,  7,  7, 3,
912         6, 10, 10, 4,
913         6, 10, 10, 4,
914         2,  3,  3, 1
915     };
916     uint32_t padLeft = 1;
917     uint32_t padTop = 1;
918     uint32_t padRight = 1;
919     uint32_t padBottom = 1;
920
921     return Convolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
922             workloadFactory,
923             memoryManager,
924             inputNoQuantizedValues,
925             inputTensorInfo,
926             kernelNoQuantizedValues,
927             kernelTensorInfo,
928             outputExpectedNoQuantizedValues,
929             outputTensorInfo,
930             2,
931             2,
932             layout,
933             padLeft,
934             padTop,
935             padRight,
936             padBottom,
937             3,
938             3,
939             biasEnabled
940             );
941 }
942
943 template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
944 Convolution2d3x3Dilation3x3Test<armnn::DataType::Float32, armnn::DataType::Float32>(
945     armnn::IWorkloadFactory&,
946     const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
947     bool,
948     armnn::DataLayout);
949
950 template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
951 Convolution2d3x3Dilation3x3Test<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
952     armnn::IWorkloadFactory&,
953     const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
954     bool,
955     armnn::DataLayout);
956
957 template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
958 Convolution2d3x3Dilation3x3Test<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
959     armnn::IWorkloadFactory&,
960     const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
961     bool,
962     armnn::DataLayout);
963
964 template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
965 Convolution2d2x3x3Dilation3x3Test<armnn::DataType::Float32, armnn::DataType::Float32>(
966     armnn::IWorkloadFactory&,
967     const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
968     bool,
969     armnn::DataLayout);
970
971 template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
972 Convolution2d2x3x3Dilation3x3Test<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
973     armnn::IWorkloadFactory&,
974     const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
975     bool,
976     armnn::DataLayout);
977
978 template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
979 Convolution2d2x3x3Dilation3x3Test<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
980     armnn::IWorkloadFactory&,
981     const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
982     bool,
983     armnn::DataLayout);
984
985 template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
986 Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<armnn::DataType::Float32, armnn::DataType::Float32>(
987     armnn::IWorkloadFactory &workloadFactory,
988     const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager,
989     bool biasEnabled,
990     const armnn::DataLayout layout);
991
992 template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
993 Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
994     armnn::IWorkloadFactory &workloadFactory,
995     const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager,
996     bool biasEnabled,
997     const armnn::DataLayout layout);
998
999 template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
1000 Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
1001     armnn::IWorkloadFactory &workloadFactory,
1002     const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager,
1003     bool biasEnabled,
1004     const armnn::DataLayout layout);
1005
1006 template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
1007          typename T = armnn::ResolveType<ArmnnType>>
1008 LayerTestResult<T, 4> DepthwiseConvolution2dAsymmetricTestCommon(
1009     armnn::IWorkloadFactory& workloadFactory,
1010     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1011     float qScale,
1012     int32_t qOffset,
1013     bool biasEnabled,
1014     const armnn::DataLayout layout)
1015 {
1016     // Use a single-batch 2-channel 5x5 image as input.
1017     armnn::TensorInfo inputTensorInfo({ 1, 2, 5, 5 }, ArmnnType);
1018     auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
1019         QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(),
1020         {
1021              0,  1,  2,  3,  4,
1022              5,  6,  7,  8,  9,
1023             10, 11, 12, 13, 14,
1024             15, 16, 17, 18, 19,
1025             20, 21, 22, 23, 24,
1026
1027             25, 26, 27, 28, 29,
1028             30, 31, 32, 33, 34,
1029             35, 36, 37, 38, 39,
1030             40, 41, 42, 43, 44,
1031             45, 46, 47, 48, 49
1032         })));
1033
1034     // Use a depth multiplier of 1 on a 2-channel 4x4 kernel.
1035     armnn::TensorInfo kernelTensorInfo({ 1, 2, 4, 4 }, ArmnnType);
1036     auto kernel = MakeTensor<T, 4>(kernelTensorInfo, std::vector<T>(
1037         QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(),
1038         {
1039             32, 31, 30, 29,
1040             28, 27, 26, 25,
1041             24, 23, 22, 21,
1042             20, 19, 18, 17,
1043
1044             16, 15, 14, 13,
1045             12, 11, 10,  9,
1046              8,  7,  6,  5,
1047              4,  3,  2,  1
1048         })));
1049
1050     // Expected output is 1 batch of a 2-channel 5x5 image.
1051     // Calculated using the python tensorflow library with strideX=1, strideY=1.
1052     armnn::TensorInfo outputTensorInfo({ 1, 2, 5, 5 }, ArmnnType);
1053     boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(
1054         QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(),
1055         {
1056             1062, 1580, 1850, 1530, 1117,
1057             2140, 3108, 3500, 2842, 2042,
1058             3580, 5068, 5460, 4342, 3062,
1059             3618, 5072, 5390, 4248, 2971,
1060             3074, 4282, 4510, 3533, 2457,
1061
1062             1550, 2284, 2362, 1955, 1428,
1063             2910, 4206, 4342, 3528, 2536,
1064             3390, 4886, 5022, 4068, 2916,
1065             3566, 5056, 5182, 4133, 2922,
1066             3100, 4352, 4452, 3517, 2465
1067         })));
1068
1069     return DepthwiseConvolution2dAsymmetricTestImpl<ArmnnType, ArmnnBType>(
1070         workloadFactory,
1071         memoryManager,
1072         input,
1073         kernel,
1074         GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
1075         expectedOutput,
1076         qScale,
1077         qOffset,
1078         layout,
1079         1,  // Padding left.
1080         1,  // Padding top.
1081         2,  // Padding right.
1082         2,  // Padding bottom.
1083         1,  // strideX
1084         1); // strideY
1085 }
1086
1087 template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
1088          typename T = armnn::ResolveType<ArmnnType>>
1089 LayerTestResult<T, 4> DepthwiseConvolution2dNhwcTestCommon(
1090     armnn::IWorkloadFactory& workloadFactory,
1091     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1092     float qScale,
1093     int32_t qOffset,
1094     bool biasEnabled)
1095 {
1096     auto layout = armnn::DataLayout::NHWC;
1097
1098     armnn::TensorInfo inputTensorInfo({ 1, 2, 5, 5}, ArmnnType);
1099     auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
1100         QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(),
1101         {
1102              0,  1,  2,  3,  4,
1103              5,  6,  7,  8,  9,
1104             10, 11, 12, 13, 14,
1105             15, 16, 17, 18, 19,
1106             20, 21, 22, 23, 24,
1107
1108             25, 26, 27, 28, 29,
1109             30, 31, 32, 33, 34,
1110             35, 36, 37, 38, 39,
1111             40, 41, 42, 43, 44,
1112             45, 46, 47, 48, 49
1113         })));
1114
1115     armnn::TensorInfo kernelTensorInfo({ 1, 2, 4, 4 }, ArmnnType);
1116     auto kernel = MakeTensor<T, 4>(kernelTensorInfo, std::vector<T>(
1117         QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(),
1118         {
1119              32, 31, 30, 29,
1120              28, 27, 26, 25,
1121              24, 23, 22, 21,
1122              20, 19, 18, 17,
1123
1124              16, 15, 14, 13,
1125              12, 11, 10,  9,
1126               8,  7,  6,  5,
1127               4,  3,  2,  1
1128         })));
1129
1130     armnn::TensorInfo outputTensorInfo({ 1, 2, 5, 5}, ArmnnType);
1131     boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(
1132         QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(),
1133         {
1134             1062, 1580, 1850, 1530, 1117,
1135             2140, 3108, 3500, 2842, 2042,
1136             3580, 5068, 5460, 4342, 3062,
1137             3618, 5072, 5390, 4248, 2971,
1138             3074, 4282, 4510, 3533, 2457,
1139
1140             1550, 2284, 2362, 1955, 1428,
1141             2910, 4206, 4342, 3528, 2536,
1142             3390, 4886, 5022, 4068, 2916,
1143             3566, 5056, 5182, 4133, 2922,
1144             3100, 4352, 4452, 3517, 2465
1145         })));
1146
1147     return DepthwiseConvolution2dTestImpl<ArmnnType, ArmnnBType>(
1148         workloadFactory,
1149         memoryManager,
1150         input,
1151         kernel,
1152         GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
1153         expectedOutput,
1154         qScale,
1155         qOffset,
1156         layout,
1157         1,  // Padding left.
1158         1,  // Padding top.
1159         2,  // Padding right.
1160         2,  // Padding bottom.
1161         1,  // strideX
1162         1);  // strideY
1163 }
1164
1165 template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
1166          typename T = armnn::ResolveType<ArmnnType>>
1167 LayerTestResult<T, 4> SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTestCommon(
1168     armnn::IWorkloadFactory& workloadFactory,
1169     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1170     float qScale,
1171     int32_t qOffset,
1172     bool biasEnabled)
1173 {
1174     auto layout = armnn::DataLayout::NHWC;
1175
1176     armnn::TensorInfo inputTensorInfo({ 1, 1, 9, 9}, ArmnnType);
1177     auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
1178         QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(),
1179         {
1180              0, 0, 0, 0, 0, 0, 0, 0, 0,
1181              0, 0, 0, 0, 0, 0, 0, 0, 0,
1182              0, 0, 0, 0, 0, 0, 0, 0, 0,
1183              0, 0, 0, 1, 1, 1, 0, 0, 0,
1184              0, 0, 0, 1, 1, 1, 0, 0, 0,
1185              0, 0, 0, 1, 1, 1, 0, 0, 0,
1186              0, 0, 0, 0, 0, 0, 0, 0, 0,
1187              0, 0, 0, 0, 0, 0, 0, 0, 0,
1188              0, 0, 0, 0, 0, 0, 0, 0, 0
1189         })));
1190
1191     armnn::TensorInfo kernelTensorInfo({ 1, 1, 3, 3}, ArmnnType);
1192     auto kernel = MakeTensor<T, 4>(kernelTensorInfo, std::vector<T>(
1193         QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(),
1194         {
1195              1, 2, 3,
1196              4, 5, 6,
1197              7, 8, 9
1198         })));
1199
1200     uint32_t padLeft = 0;
1201     uint32_t padTop = 0;
1202     uint32_t padRight = 0;
1203     uint32_t padBottom = 0;
1204     uint32_t strideX  = 1;
1205     uint32_t strideY  = 1;
1206     uint32_t dilationX  = 3;
1207     uint32_t dilationY  = 3;
1208
1209     // Since the dilation rate is 3 this will reduce the size of the output from 9x9 to 3x3 of all 5s.
1210     armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3}, ArmnnType);
1211     boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(
1212         QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(),
1213         {
1214              5, 5, 5,
1215              5, 5, 5,
1216              5, 5, 5
1217         })));
1218
1219     return DepthwiseConvolution2dTestImpl<ArmnnType, ArmnnBType>(
1220         workloadFactory,
1221         memoryManager,
1222         input,
1223         kernel,
1224         GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
1225         expectedOutput,
1226         qScale,
1227         qOffset,
1228         layout,
1229         padLeft,
1230         padTop,
1231         padRight,
1232         padBottom,
1233         strideX,
1234         strideY,
1235         dilationX,
1236         dilationY);
1237 }
1238
1239
1240 template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
1241 LayerTestResult<T, 4> DepthwiseConvolution2d3x3DilationTestCommon(
1242         armnn::IWorkloadFactory& workloadFactory,
1243         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1244         const std::vector<float>& inputNoQuantizedValues,
1245         armnn::TensorInfo& inputTensorInfo,
1246         const std::vector<float>& kernelNoQuantizedValues,
1247         armnn::TensorInfo& kernelTensorInfo,
1248         const std::vector<float>& outputExpectedNoQuantizedValues,
1249         armnn::TensorInfo& outputTensorInfo,
1250         uint32_t dilationX,
1251         uint32_t dilationY,
1252         armnn::DataLayout layout = armnn::DataLayout::NCHW,
1253         bool biasEnabled = false)
1254 {
1255     float qScale;
1256     int32_t qOffset;
1257     switch (ArmnnType)
1258     {
1259         case armnn::DataType::QuantisedAsymm8:
1260         {
1261             qScale = 0.1f;
1262             qOffset = 128;
1263             break;
1264         }
1265         case armnn::DataType::QuantisedSymm16:
1266         {
1267             qScale = 0.1f;
1268             qOffset = 0;
1269             break;
1270         }
1271         case armnn::DataType::Float32:
1272         default:
1273         {
1274             qScale = 0.f;
1275             qOffset = 0;
1276             break;
1277         }
1278     }
1279
1280     inputTensorInfo.SetQuantizationScale(qScale);
1281     inputTensorInfo.SetQuantizationOffset(qOffset);
1282     kernelTensorInfo.SetQuantizationScale(qScale);
1283     kernelTensorInfo.SetQuantizationOffset(qOffset);
1284     outputTensorInfo.SetQuantizationScale(qScale);
1285     outputTensorInfo.SetQuantizationOffset(qOffset);
1286
1287     auto input = MakeTensor<T, 4>(inputTensorInfo,
1288                                   std::vector<T>(QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(),
1289                                                                     inputTensorInfo.GetQuantizationOffset(),
1290                                                                     inputNoQuantizedValues)));
1291     auto kernel = MakeTensor<T, 4>(kernelTensorInfo,
1292                                    std::vector<T>(QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(),
1293                                                                      kernelTensorInfo.GetQuantizationOffset(),
1294                                                                      kernelNoQuantizedValues)));
1295     auto expectedOutput = MakeTensor<T, 4>(outputTensorInfo,
1296                                            std::vector<T>(QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(),
1297                                                                              outputTensorInfo.GetQuantizationOffset(),
1298                                                                              outputExpectedNoQuantizedValues)));
1299
1300     uint32_t padLeft = 0;
1301     uint32_t padTop = 0;
1302     uint32_t padRight = 0;
1303     uint32_t padBottom = 0;
1304     uint32_t strideX  = 1;
1305     uint32_t strideY  = 1;
1306
1307     return DepthwiseConvolution2dTestImpl<ArmnnType, ArmnnBType>(
1308             workloadFactory,
1309             memoryManager,
1310             input,
1311             kernel,
1312             GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
1313             expectedOutput,
1314             qScale,
1315             qOffset,
1316             layout,
1317             padLeft,
1318             padTop,
1319             padRight,
1320             padBottom,
1321             strideX,
1322             strideY,
1323             dilationX,
1324             dilationY);
1325 }
1326
1327 template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
1328 LayerTestResult<T, 4> DepthwiseConvolution2d3x3Dilation3x3Test(
1329         armnn::IWorkloadFactory& workloadFactory,
1330         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1331         bool biasEnabled,
1332         const armnn::DataLayout layout)
1333 {
1334     armnn::TensorInfo inputTensorInfo({1, 1, 10, 10}, ArmnnType);
1335     std::vector<float> inputNoQuantizedValues =
1336             {
1337                     0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1338                     0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1339                     0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1340                     0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1341                     0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1342                     0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1343                     0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1344                     0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1345                     0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1346                     0, 0, 0, 0, 0, 0, 0, 0, 0, 0
1347             };
1348
1349     armnn::TensorInfo kernelTensorInfo({ 1, 1, 3, 3}, ArmnnType);
1350     std::vector<float> kernelNoQuantizedValues =
1351             {
1352                     1, 2, 3,
1353                     4, 5, 6,
1354                     7, 8, 9
1355             };
1356
1357     // Since the dilation rate is 3 this will dilate the kernel to be like 7x7,
1358     // therefore the output will be 4x4: (I−K+2P)/S +1 => (10-7 +0)/1 +1
1359     armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4}, ArmnnType);
1360     std::vector<float> outputExpectedNoQuantizedValues =
1361             {
1362                     6., 5., 5., 5.,
1363                     6., 5., 5., 5.,
1364                     6., 5., 5., 5.,
1365                     3., 2., 2., 2.
1366             };
1367
1368     return DepthwiseConvolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
1369             workloadFactory,
1370             memoryManager,
1371             inputNoQuantizedValues,
1372             inputTensorInfo,
1373             kernelNoQuantizedValues,
1374             kernelTensorInfo,
1375             outputExpectedNoQuantizedValues,
1376             outputTensorInfo,
1377             3,
1378             3,
1379             layout,
1380             biasEnabled);
1381 }
1382
1383 template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
1384 LayerTestResult<T, 4> DepthwiseConvolution2d2x3x3Dilation3x3Test(
1385         armnn::IWorkloadFactory& workloadFactory,
1386         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1387         bool biasEnabled,
1388         const armnn::DataLayout layout)
1389 {
1390     armnn::TensorInfo inputTensorInfo({1, 2, 10, 10}, ArmnnType);
1391     std::vector<float> inputNoQuantizedValues =
1392             {
1393                     0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1394                     0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1395                     0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1396                     0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1397                     0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1398                     0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1399                     0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1400                     0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1401                     0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1402                     0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1403
1404                     0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1405                     0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1406                     0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1407                     0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1408                     0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1409                     0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1410                     0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1411                     0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1412                     0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1413                     0, 0, 0, 0, 0, 0, 0, 0, 0, 0
1414             };
1415
1416     armnn::TensorInfo kernelTensorInfo({ 1, 2, 3, 3}, ArmnnType);
1417     std::vector<float> kernelNoQuantizedValues =
1418             {
1419                     1, 2, 3,
1420                     4, 5, 6,
1421                     7, 8, 9,
1422
1423                     1, 2, 3,
1424                     4, 5, 6,
1425                     7, 8, 9
1426             };
1427
1428     // Since the dilation rate is 3 this will dilate the kernel to be like 7x7,
1429     // therefore the output will be 2x4x4: (I−K+2P)/S +1 => (10-7 +0)/1 +1
1430     armnn::TensorInfo outputTensorInfo({ 1, 2, 4, 4}, ArmnnType);
1431     std::vector<float> outputExpectedNoQuantizedValues =
1432             {
1433                     6., 5., 5., 5.,
1434                     6., 5., 5., 5.,
1435                     6., 5., 5., 5.,
1436                     3., 2., 2., 2.,
1437
1438                     6., 5., 5., 5.,
1439                     6., 5., 5., 5.,
1440                     6., 5., 5., 5.,
1441                     3., 2., 2., 2.
1442             };
1443
1444     return DepthwiseConvolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
1445             workloadFactory,
1446             memoryManager,
1447             inputNoQuantizedValues,
1448             inputTensorInfo,
1449             kernelNoQuantizedValues,
1450             kernelTensorInfo,
1451             outputExpectedNoQuantizedValues,
1452             outputTensorInfo,
1453             3,
1454             3,
1455             layout,
1456             biasEnabled);
1457 }
1458
1459
1460 template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
1461 DepthwiseConvolution2d3x3Dilation3x3Test<armnn::DataType::Float32, armnn::DataType::Float32>(
1462         armnn::IWorkloadFactory&,
1463         const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
1464         bool,
1465         armnn::DataLayout);
1466
1467 template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
1468 DepthwiseConvolution2d3x3Dilation3x3Test<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
1469         armnn::IWorkloadFactory&,
1470         const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
1471         bool,
1472         armnn::DataLayout);
1473
1474 template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
1475 DepthwiseConvolution2d3x3Dilation3x3Test<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
1476         armnn::IWorkloadFactory&,
1477         const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
1478         bool,
1479         armnn::DataLayout);
1480
1481 template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
1482 DepthwiseConvolution2d2x3x3Dilation3x3Test<armnn::DataType::Float32, armnn::DataType::Float32>(
1483         armnn::IWorkloadFactory&,
1484         const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
1485         bool,
1486         armnn::DataLayout);
1487
1488 template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
1489 DepthwiseConvolution2d2x3x3Dilation3x3Test<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
1490         armnn::IWorkloadFactory&,
1491         const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
1492         bool,
1493         armnn::DataLayout);
1494
1495 template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
1496 DepthwiseConvolution2d2x3x3Dilation3x3Test<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
1497         armnn::IWorkloadFactory&,
1498         const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
1499         bool,
1500         armnn::DataLayout);
1501
1502 LayerTestResult<float, 4> DepthwiseConvolution2dTest(
1503     armnn::IWorkloadFactory& workloadFactory,
1504     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1505     bool biasEnabled,
1506     const armnn::DataLayout layout)
1507 {
1508     return DepthwiseConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
1509         workloadFactory, memoryManager, 0.0f, 0, biasEnabled, layout);
1510 }
1511
1512 LayerTestResult<float, 4> DepthwiseConvolution2dDepthNhwcTest(
1513     armnn::IWorkloadFactory& workloadFactory,
1514     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1515     bool biasEnabled)
1516 {
1517     return DepthwiseConvolution2dNhwcTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
1518         workloadFactory, memoryManager, 0.0f, 0, biasEnabled);
1519 }
1520
1521 LayerTestResult<float, 4> DepthwiseConvolution2dDepthMul1Test(
1522     armnn::IWorkloadFactory& workloadFactory,
1523     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1524     bool biasEnabled,
1525     const armnn::DataLayout layout)
1526 {
1527     return DepthwiseConvolution2dDepthMul1TestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
1528         workloadFactory, memoryManager, 0.0f, 0, biasEnabled, layout);
1529 }
1530
1531 LayerTestResult<float, 4> DepthwiseConvolution2dDepthMul64Test(
1532     armnn::IWorkloadFactory& workloadFactory,
1533     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1534 {
1535     armnn::TensorInfo inputTensorInfo({ 1, 1, 2, 2 }, armnn::DataType::Float32);
1536     auto input = MakeTensor<float, 4>(inputTensorInfo, { 1.f, 2.f, 3.f, 4.f });
1537
1538     std::vector<float> kernelData;
1539     std::vector<float> singleDepthKernel{ 1.f, -1.f, -1.f, 1.f };
1540     for (unsigned int i = 0; i < 64; ++i)
1541     {
1542         kernelData.insert(kernelData.end(), singleDepthKernel.begin(), singleDepthKernel.end());
1543     }
1544     armnn::TensorInfo kernelTensorInfo({ 64, 1, 2, 2 }, armnn::DataType::Float32);
1545     auto kernel = MakeTensor<float, 4>(kernelTensorInfo, kernelData);
1546
1547     std::vector<float> expectedOutputData(64, 0.f);
1548     armnn::TensorInfo outputTensorInfo({ 1, 64, 1, 1 }, armnn::DataType::Float32);
1549     auto expectedOutput = MakeTensor<float, 4>(outputTensorInfo, expectedOutputData);
1550
1551     return DepthwiseConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
1552             workloadFactory,
1553             memoryManager,
1554             input,
1555             kernel,
1556             boost::multi_array<float, 1>(),
1557             expectedOutput,
1558             0.f,
1559             0,
1560             armnn::DataLayout::NCHW);
1561 }
1562
1563 LayerTestResult<float, 4> DepthwiseConvolution2dAsymmetricTest(
1564     armnn::IWorkloadFactory& workloadFactory,
1565     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1566     bool biasEnabled,
1567     const armnn::DataLayout layout)
1568 {
1569     return DepthwiseConvolution2dAsymmetricTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
1570         workloadFactory, memoryManager, 0.0f, 0, biasEnabled, layout);
1571 }
1572
1573 LayerTestResult<uint8_t, 4> DepthwiseConvolution2dUint8Test(
1574     armnn::IWorkloadFactory& workloadFactory,
1575     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1576     bool biasEnabled,
1577     const armnn::DataLayout layout)
1578 {
1579     return DepthwiseConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
1580         workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
1581 }
1582
1583 LayerTestResult<uint8_t, 4> DepthwiseConvolution2dDepthMul1Uint8Test(
1584     armnn::IWorkloadFactory& workloadFactory,
1585     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1586     bool biasEnabled,
1587     const armnn::DataLayout layout)
1588 {
1589     return DepthwiseConvolution2dDepthMul1TestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
1590         workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
1591 }
1592
1593 LayerTestResult<float, 4> SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTest(
1594         armnn::IWorkloadFactory& workloadFactory,
1595         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1596 {
1597     return SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
1598             workloadFactory,
1599             memoryManager,
1600             0.f,
1601             0,
1602             false);
1603 }
1604
1605 LayerTestResult<int16_t, 4> DepthwiseConvolution2dInt16Test(
1606         armnn::IWorkloadFactory& workloadFactory,
1607         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1608         bool biasEnabled,
1609         const armnn::DataLayout layout)
1610 {
1611     return DepthwiseConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
1612         workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
1613 }
1614
1615 LayerTestResult<int16_t, 4> DepthwiseConvolution2dDepthMul1Int16Test(
1616                 armnn::IWorkloadFactory& workloadFactory,
1617                 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1618                 bool biasEnabled,
1619                 const armnn::DataLayout layout)
1620 {
1621     return DepthwiseConvolution2dDepthMul1TestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
1622         workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
1623 }
1624
1625 LayerTestResult<float, 4> CompareDepthwiseConvolution2dFloatTest(
1626     armnn::IWorkloadFactory& workloadFactory,
1627     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1628     armnn::IWorkloadFactory& refWorkloadFactory,
1629     const armnn::DataLayout layout)
1630 {
1631     return CompareDepthwiseConvolution2dTestImpl<armnn::DataType::Float32>(
1632         workloadFactory, memoryManager, refWorkloadFactory, layout);
1633 }
1634
1635 LayerTestResult<uint8_t, 4> CompareDepthwiseConvolution2dUint8Test(
1636     armnn::IWorkloadFactory& workloadFactory,
1637     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1638     armnn::IWorkloadFactory& refWorkloadFactory,
1639     const armnn::DataLayout layout)
1640 {
1641     return CompareDepthwiseConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8>(
1642         workloadFactory, memoryManager, refWorkloadFactory, layout);
1643 }
1644
1645 LayerTestResult<float,4> SimpleNormalizationAcrossTest(
1646     armnn::IWorkloadFactory& workloadFactory,
1647     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1648 {
1649     auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
1650     auto normChannel = armnn::NormalizationAlgorithmChannel::Across;
1651     return SimpleNormalizationTestImpl(workloadFactory, memoryManager, normChannel, normMethod);
1652 }
1653
1654 LayerTestResult<float,4> SimpleNormalizationWithinTest(
1655     armnn::IWorkloadFactory& workloadFactory,
1656     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1657 {
1658     auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
1659     auto normChannel = armnn::NormalizationAlgorithmChannel::Within;
1660     return SimpleNormalizationTestImpl(workloadFactory, memoryManager, normChannel, normMethod);
1661 }
1662
1663 LayerTestResult<float,4> SimpleNormalizationAcrossNhwcTest(
1664     armnn::IWorkloadFactory& workloadFactory,
1665     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1666 {
1667     auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
1668     auto normChannel = armnn::NormalizationAlgorithmChannel::Across;
1669     return SimpleNormalizationNhwcTestImpl(workloadFactory, memoryManager, normChannel, normMethod);
1670 }
1671
1672 LayerTestResult<float,2> SimpleSoftmaxTest(
1673     armnn::IWorkloadFactory& workloadFactory,
1674     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1675     float beta)
1676 {
1677     return SimpleSoftmaxTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, beta);
1678 }
1679
1680 LayerTestResult<float,2> SimpleAxisSoftmaxTest(
1681         armnn::IWorkloadFactory& workloadFactory,
1682         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1683         float beta,
1684         int axis)
1685 {
1686     return SimpleSoftmaxTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, beta, axis);
1687 }
1688
1689 LayerTestResult<float,3> Simple3dSoftmaxTest(
1690         armnn::IWorkloadFactory& workloadFactory,
1691         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1692         float beta)
1693 {
1694     Simple3dSoftmaxOutputData data;
1695     return Simple3dSoftmaxTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, beta,
1696                                                              data.inputShape, data.outputData, data.inputData);
1697 }
1698
1699 LayerTestResult<float,3> Simple3dAxisSoftmaxTest(
1700         armnn::IWorkloadFactory& workloadFactory,
1701         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1702         float beta,
1703         int axis)
1704 {
1705     armnn::TensorShape inputShape;
1706     std::vector<float> inputData;
1707     std::vector<float> outputData;
1708     switch (axis)
1709     {
1710     case -3:
1711     case 0:
1712         {
1713             inputShape = {5, 2, 2};
1714
1715             inputData =
1716                     {
1717                             17.0f, -1.0f, 17.0f, -1.0f, 16.0f, -2.0f, 16.0f, -2.0f, 15.0f, -3.0f,
1718
1719                             15.0f, -3.0f, 14.0f, -4.0f, 14.0f, -4.0f, 1.0f, -17.0f, 1.0f, -17.0f
1720                     };
1721
1722             outputData =
1723                     {
1724                             0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f,
1725                             0.236882800924671f,
1726                             0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f,
1727                             0.087144312427294f,
1728
1729                             0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f,
1730                             0.032058600957022f,
1731                             0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f,
1732                             7.246299848982885e-08f
1733                     };
1734             break;
1735         }
1736     case -2:
1737     case 1:
1738         {
1739             inputShape = {2, 5, 2};
1740
1741             inputData =
1742                     {
1743                             17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f,
1744
1745                             17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f
1746                     };
1747
1748             outputData =
1749                     {
1750                             0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f,
1751                             0.087144312427294f,
1752                             0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f,
1753                             7.246299848982885e-08f,
1754
1755                             0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f,
1756                             0.087144312427294f,
1757                             0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f,
1758                             7.246299848982885e-08f
1759                     };
1760         break;
1761         }
1762     case -1:
1763     case 2:
1764         {
1765             inputShape = {2, 2, 5};
1766
1767             inputData =
1768                     {
1769                             17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f,
1770                             17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f
1771                     };
1772
1773             outputData =
1774                     {
1775                             0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
1776                             7.246299848982885e-08f,
1777                             0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
1778                             7.246299848982885e-08f,
1779
1780                             0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
1781                             7.246299848982885e-08f,
1782                             0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
1783                             7.246299848982885e-08f
1784                     };
1785             break;
1786         }
1787     }
1788
1789     return Simple3dSoftmaxTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, beta,
1790                                                              inputShape, outputData, inputData, axis);
1791 }
1792
1793 LayerTestResult<float,4> Simple4dSoftmaxTest(
1794         armnn::IWorkloadFactory& workloadFactory,
1795         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1796         float beta)
1797 {
1798     Simple4dSoftmaxData data;
1799     return Simple4dSoftmaxTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, beta, data.inputShape,
1800                                                              data.outputData, data.inputData);
1801 }
1802
1803 LayerTestResult<float,4> Simple4dAxisSoftmaxTest(
1804         armnn::IWorkloadFactory& workloadFactory,
1805         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1806         float beta,
1807         int axis)
1808 {
1809     armnn::TensorShape inputShape;
1810     std::vector<float> inputData;
1811     std::vector<float> outputData;
1812     switch (axis)
1813     {
1814     case -4:
1815     case 0:
1816         {
1817             inputShape = {5, 2, 2, 2};
1818
1819             inputData =
1820                     {
1821                             17.0f, -1.0f, 17.0f, -1.0f, 17.0f, -1.0f, 17.0f, -1.0f, 16.0f, -2.0f,
1822                             16.0f, -2.0f, 16.0f, -2.0f, 16.0f, -2.0f, 15.0f, -3.0f, 15.0f, -3.0f,
1823                             15.0f, -3.0f, 15.0f, -3.0f, 14.0f, -4.0f, 14.0f, -4.0f, 14.0f, -4.0f,
1824                             14.0f, -4.0f, 1.0f, -17.0f, 1.0f, -17.0f, 1.0f, -17.0f, 1.0f, -17.0f
1825                     };
1826
1827             outputData =
1828                     {
1829                             0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f,
1830                             0.643914213228014f,
1831                             0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.236882800924671f,
1832                             0.236882800924671f,
1833                             0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.236882800924671f,
1834                             0.236882800924671f,
1835                             0.236882800924671f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f,
1836                             0.087144312427294f,
1837
1838                             0.087144312427294f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f,
1839                             0.032058600957022f,
1840                             0.032058600957022f, 0.032058600957022f, 0.032058600957022f, 0.032058600957022f,
1841                             0.032058600957022f,
1842                             0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f,
1843                             7.246299848982885e-08f,
1844                             7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f,
1845                             7.246299848982885e-08f, 7.246299848982885e-08f
1846                     };
1847             break;
1848         }
1849     case -3:
1850     case 1:
1851         {
1852             inputShape = {2, 5, 2, 2};
1853
1854             inputData =
1855                     {
1856                             17.0f, -1.0f, 17.0f, -1.0f, 16.0f, -2.0f, 16.0f, -2.0f, 15.0f, -3.0f,
1857                             15.0f, -3.0f, 14.0f, -4.0f, 14.0f, -4.0f, 1.0f, -17.0f, 1.0f, -17.0f,
1858                             17.0f, -1.0f, 17.0f, -1.0f, 16.0f, -2.0f, 16.0f, -2.0f, 15.0f, -3.0f,
1859                             15.0f, -3.0f, 14.0f, -4.0f, 14.0f, -4.0f, 1.0f, -17.0f, 1.0f, -17.0f
1860                     };
1861
1862             outputData =
1863                     {
1864                             0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f,
1865                             0.236882800924671f,
1866                             0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f,
1867                             0.087144312427294f,
1868                             0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f,
1869                             0.032058600957022f,
1870                             0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f,
1871                             7.246299848982885e-08f,
1872
1873
1874                             0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f,
1875                             0.236882800924671f,
1876                             0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f,
1877                             0.087144312427294f,
1878                             0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f,
1879                             0.032058600957022f,
1880                             0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f,
1881                             7.246299848982885e-08f
1882                     };
1883             break;
1884         }
1885     case -2:
1886     case 2:
1887         {
1888         inputShape = {2, 2, 5, 2};
1889
1890         inputData =
1891                 {
1892                         17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f,
1893                         17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f,
1894                         17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f,
1895                         17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f
1896                 };
1897
1898         outputData =
1899                 {
1900                         0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f,
1901                         0.087144312427294f,
1902                         0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f,
1903                         7.246299848982885e-08f,
1904                         0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f,
1905                         0.087144312427294f,
1906                         0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f,
1907                         7.246299848982885e-08f,
1908
1909                         0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f,
1910                         0.087144312427294f,
1911                         0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f,
1912                         7.246299848982885e-08f,
1913                         0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f,
1914                         0.087144312427294f,
1915                         0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f,
1916                         7.246299848982885e-08f
1917                 };
1918         break;
1919         }
1920     case -1:
1921     case 3:
1922         {
1923             inputShape = {2, 2, 2, 5};
1924
1925             inputData =
1926                     {
1927                             17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f,
1928                             17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f,
1929                             17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f,
1930                             17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f
1931                     };
1932
1933             outputData =
1934                     {
1935                             0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
1936                             7.246299848982885e-08f,
1937                             0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
1938                             7.246299848982885e-08f,
1939                             0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
1940                             7.246299848982885e-08f,
1941                             0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
1942                             7.246299848982885e-08f,
1943
1944                             0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
1945                             7.246299848982885e-08f,
1946                             0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
1947                             7.246299848982885e-08f,
1948                             0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
1949                             7.246299848982885e-08f,
1950                             0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
1951                             7.246299848982885e-08f
1952                     };
1953             break;
1954         }
1955     }
1956
1957     return Simple4dSoftmaxTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, beta, inputShape,
1958                                                              outputData, inputData, axis);
1959 }
1960
1961 LayerTestResult<uint8_t,2> SimpleSoftmaxUint8Test(
1962     armnn::IWorkloadFactory& workloadFactory,
1963     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1964     float beta)
1965 {
1966     return SimpleSoftmaxTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, beta);
1967 }
1968
1969 LayerTestResult<uint8_t,3> Simple3dSoftmaxUint8Test(
1970         armnn::IWorkloadFactory& workloadFactory,
1971         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1972         float beta)
1973 {
1974     Simple3dSoftmaxOutputData data;
1975     return Simple3dSoftmaxTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, beta,
1976                                                                      data.inputShape, data.outputData, data.inputData);
1977 }
1978
1979 LayerTestResult<uint8_t,4> Simple4dSoftmaxUint8Test(
1980         armnn::IWorkloadFactory& workloadFactory,
1981         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1982         float beta)
1983 {
1984     Simple4dSoftmaxData data;
1985
1986     return Simple4dSoftmaxTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, beta,
1987                                                                      data.inputShape, data.outputData, data.inputData);
1988 }
1989
1990 LayerTestResult<int16_t,2> SimpleSoftmaxUint16Test(
1991         armnn::IWorkloadFactory& workloadFactory,
1992         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1993         float beta)
1994 {
1995     return SimpleSoftmaxTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, beta);
1996 }
1997
1998 LayerTestResult<int16_t,3> Simple3dSoftmaxUint16Test(
1999         armnn::IWorkloadFactory& workloadFactory,
2000         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2001         float beta)
2002 {
2003     Simple3dSoftmaxOutputData data;
2004     return Simple3dSoftmaxTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, beta,
2005                                                                      data.inputShape, data.outputData, data.inputData);
2006 }
2007
2008 LayerTestResult<int16_t,4> Simple4dSoftmaxUint16Test(
2009         armnn::IWorkloadFactory& workloadFactory,
2010         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2011         float beta)
2012 {
2013     Simple4dSoftmaxData data;
2014
2015     return Simple4dSoftmaxTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, beta,
2016                                                                      data.inputShape, data.outputData, data.inputData);
2017 }
2018
2019 LayerTestResult<float,4> CompareNormalizationTest(
2020     armnn::IWorkloadFactory& workloadFactory,
2021     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2022     armnn::IWorkloadFactory& refWorkloadFactory,
2023     armnn::NormalizationAlgorithmChannel normChannel,
2024     armnn::NormalizationAlgorithmMethod normMethod)
2025 {
2026     return CompareNormalizationTestImpl(workloadFactory, memoryManager, refWorkloadFactory, normChannel, normMethod);
2027 }
2028
2029 LayerTestResult<float,2> CompareSoftmaxTest(
2030     armnn::IWorkloadFactory& workloadFactory,
2031     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2032     armnn::IWorkloadFactory& refWorkloadFactory,
2033     float beta)
2034 {
2035     return CompareSoftmaxTestImpl<armnn::DataType::Float32>(
2036         workloadFactory, memoryManager, refWorkloadFactory, beta);
2037 }
2038
2039 LayerTestResult<uint8_t,2> CompareSoftmaxUint8Test(
2040     armnn::IWorkloadFactory& workloadFactory,
2041     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2042     armnn::IWorkloadFactory& refWorkloadFactory,
2043     float beta)
2044 {
2045     return CompareSoftmaxTestImpl<armnn::DataType::QuantisedAsymm8>(
2046         workloadFactory, memoryManager, refWorkloadFactory, beta);
2047 }
2048
2049 std::vector<LayerTestResult<float,3>> SplitterTest(
2050     armnn::IWorkloadFactory& workloadFactory,
2051     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2052 {
2053     return SplitterTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
2054 }
2055
2056 std::vector<LayerTestResult<uint8_t,3>> SplitterUint8Test(
2057     armnn::IWorkloadFactory& workloadFactory,
2058     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2059 {
2060     return SplitterTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
2061 }
2062
2063 std::vector<LayerTestResult<int16_t,3>> SplitterInt16Test(
2064     armnn::IWorkloadFactory& workloadFactory,
2065     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2066 {
2067     return SplitterTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.0f, 0);
2068 }
2069
2070 LayerTestResult<float, 3> CopyViaSplitterTest(
2071     armnn::IWorkloadFactory& workloadFactory,
2072     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2073 {
2074     return CopyViaSplitterTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
2075 }
2076
2077 LayerTestResult<uint8_t, 3> CopyViaSplitterUint8Test(
2078     armnn::IWorkloadFactory& workloadFactory,
2079     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2080 {
2081     return CopyViaSplitterTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
2082 }
2083
2084 LayerTestResult<int16_t, 3> CopyViaSplitterInt16Test(
2085         armnn::IWorkloadFactory& workloadFactory,
2086         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2087 {
2088     return CopyViaSplitterTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.0f, 0);
2089 }
2090
2091 void LstmUtilsZeroVectorTest()
2092 {
2093     armnn::TensorInfo inputDesc({4}, armnn::DataType::Float32);
2094     boost::multi_array<float, 1> input = MakeTensor<float, 1>(inputDesc, std::vector<float>(
2095             {2., 3., 3., 4.}));
2096
2097     boost::multi_array<float, 1> expectedOutput = MakeTensor<float, 1>(inputDesc, std::vector<float>(
2098             {0., 0., 0., 0.}));
2099
2100     return LstmUtilsZeroVectorTestImpl<armnn::DataType::Float32>(input, 4, expectedOutput);
2101 }
2102
2103 void LstmUtilsMeanStddevNormalizationNoneZeroInputTest()
2104 {
2105     uint32_t batchSize = 2;
2106     uint32_t vecSize = 4;
2107     armnn::TensorInfo inputDesc({batchSize, vecSize}, armnn::DataType::Float32);
2108     boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
2109             { 0.1f, 0.2f, 0.3f, 0.4f,      //batch 0
2110               0.9f, 1.0f, 1.1f, 1.2f }));  //batch 1
2111
2112     boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(inputDesc, std::vector<float>(
2113             { -1.34164071f, -0.447213531f, 0.44721365f,  1.34164071f,      //batch 0
2114               -1.34163153f, -0.447210163f, 0.447211236f, 1.3416326f  }));  //batch 1
2115
2116     return LstmUtilsMeanStddevNormalizationTestImpl<armnn::DataType::Float32>(input,
2117             vecSize, batchSize, expectedOutput);
2118 }
2119
2120 void LstmUtilsMeanStddevNormalizationAllZeroInputTest()
2121 {
2122     uint32_t batchSize = 2;
2123     uint32_t vecSize = 4;
2124     armnn::TensorInfo inputDesc({batchSize, vecSize}, armnn::DataType::Float32);
2125     boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
2126             { 0.0f, 0.0f, 0.0f, 0.0f,      //batch 0
2127               0.0f, 0.0f, 0.0f, 0.0f }));  //batch 1
2128
2129     boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(inputDesc, std::vector<float>(
2130             { 0.0f, 0.0f, 0.0f, 0.0f,      //batch 0
2131               0.0f, 0.0f, 0.0f, 0.0f }));  //batch 1
2132
2133     return LstmUtilsMeanStddevNormalizationTestImpl<armnn::DataType::Float32>(input,
2134             vecSize, batchSize, expectedOutput);
2135 }
2136
2137 void LstmUtilsMeanStddevNormalizationMixedZeroInputTest()
2138 {
2139     uint32_t batchSize = 2;
2140     uint32_t vecSize = 4;
2141     armnn::TensorInfo inputDesc({batchSize, vecSize}, armnn::DataType::Float32);
2142     boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
2143             { 0.0f, 0.0f, 0.0f, 0.0f,      //batch 0
2144               0.1f, 0.2f, 0.3f, 0.4f }));  //batch 1
2145
2146     boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(inputDesc, std::vector<float>(
2147             {         0.0f,          0.0f,        0.0f,        0.0f,      //batch 0
2148               -1.34164071f, -0.447213531f, 0.44721365f, 1.34164071f }));  //batch 1
2149
2150     return LstmUtilsMeanStddevNormalizationTestImpl<armnn::DataType::Float32>(input,
2151             vecSize, batchSize, expectedOutput);
2152 }
2153
2154
2155 void LstmUtilsVectorBatchVectorCwiseProductTest()
2156 {
2157     uint32_t batchSize = 4;
2158     uint32_t vecSize = 29;
2159     armnn::TensorInfo vecDesc({vecSize}, armnn::DataType::Float32);
2160     boost::multi_array<float, 1> vector = MakeTensor<float, 1>(vecDesc, std::vector<float>(
2161             {   1.1f,   2.2f,   3.3f,   4.4f,   5.5f,   6.6f,   7.7f,   8.8f,   9.9f, 10.1f,
2162               11.11f, 12.12f, 13.13f, 14.14f, 15.15f, 16.16f, 17.17f, 18.18f, 19.19f, 20.2f,
2163               21.21f, 22.22f, 23.23f, 24.24f, 25.25f, 26.26f, 27.27f, 28.28f,     0.0f}));
2164
2165     armnn::TensorInfo batchVecDesc({batchSize, vecSize}, armnn::DataType::Float32);
2166     boost::multi_array<float, 2> batchVector = MakeTensor<float, 2>(batchVecDesc, std::vector<float>(
2167             { /* batch 0 */
2168                 1.1f,   2.2f,   3.3f,   4.4f,   5.5f,   6.6f,   7.7f,   8.8f,   9.9f,  10.1f,
2169               11.11f, 12.12f, 13.13f, 14.14f, 15.15f, 16.16f, 17.17f, 18.18f, 19.19f,  20.2f,
2170               21.21f, 22.22f, 23.23f, 24.24f, 25.25f, 26.26f, 27.27f, 28.28f,   0.0f,
2171               /* batch 1 */
2172                 -1.1f,   -2.2f,   -3.3f,   -4.4f,   -5.5f,   -6.6f,   -7.7f,   -8.8f,   -9.9f, -10.1f,
2173               -11.11f, -12.12f, -13.13f, -14.14f, -15.15f, -16.16f, -17.17f, -18.18f, -19.19f, -20.2f,
2174               -21.21f, -22.22f, -23.23f, -24.24f, -25.25f, -26.26f, -27.27f, -28.28f,    0.0f,
2175               /* batch 2 */
2176                 1.1f,   -2.2f,   3.3f,   -4.4f,   5.5f,   -6.6f,   7.7f,   -8.8f,   9.9f, -10.1f,
2177               11.11f, -12.12f, 13.13f, -14.14f, 15.15f, -16.16f, 17.17f, -18.18f, 19.19f, -20.2f,
2178               21.21f, -22.22f, 23.23f, -24.24f, 25.25f, -26.26f, 27.27f, -28.28f,   0.0f,
2179               /* batch 3 */
2180                 -1.1f,   2.2f,   -3.3f,   4.4f,   -5.5f,   6.6f,   -7.7f,   8.8f,   -9.9f, 10.1f,
2181               -11.11f, 12.12f, -13.13f, 14.14f, -15.15f, 16.16f, -17.17f, 18.18f, -19.19f, 20.2f,
2182               -21.21f, 22.22f, -23.23f, 24.24f, -25.25f, 26.26f, -27.27f, 28.28f,    0.0f}));
2183
2184     // Expect output = input * output + output.
2185     boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(batchVecDesc, std::vector<float>(
2186             { /* batch 0 */
2187                  1.210000f,    4.840000f,   10.889999f,   19.360001f,   30.250000f,   43.559998f,
2188                 59.289997f,   77.440002f,   98.009995f,  102.010010f,  123.432091f,  146.894394f,
2189                172.396896f,  199.939606f,  229.522491f,  261.145599f,  294.808899f,  330.512421f,
2190                368.256134f,  408.040039f,  449.864075f,  493.728363f,  539.632874f,  587.577576f,
2191                637.562500f,  689.587585f,  743.652954f,  799.758423f,    0.000000f,
2192               /* batch 1 */
2193                 -1.210000f,   -4.840000f,  -10.889999f,  -19.360001f,  -30.250000f,  -43.559998f,
2194                -59.289997f,  -77.440002f,  -98.009995f, -102.010010f, -123.432091f, -146.894394f,
2195               -172.396896f, -199.939606f, -229.522491f, -261.145599f, -294.808899f, -330.512421f,
2196               -368.256134f, -408.040039f, -449.864075f, -493.728363f, -539.632874f, -587.577576f,
2197               -637.562500f, -689.587585f, -743.652954f, -799.758423f,    0.000000f,
2198               /* batch 2 */
2199                  1.210000f,   -4.840000f,  10.889999f,   -19.360001f,   30.250000f,  -43.559998f,
2200                 59.289997f,  -77.440002f,  98.009995f,  -102.010010f,  123.432091f, -146.894394f,
2201                172.396896f, -199.939606f, 229.522491f,  -261.145599f,  294.808899f, -330.512421f,
2202                368.256134f, -408.040039f, 449.864075f,  -493.728363f,  539.632874f, -587.577576f,
2203                637.562500f, -689.587585f, 743.652954f,  -799.758423f,    0.000000f,
2204               /* batch 3 */
2205                 -1.210000f,    4.840000f,  -10.889999f,   19.360001f,  -30.250000f,   43.559998f,
2206                -59.289997f,   77.440002f,  -98.009995f,  102.010010f, -123.432091f,  146.894394f,
2207               -172.396896f,  199.939606f, -229.522491f,  261.145599f, -294.808899f,  330.512421f,
2208               -368.256134f,  408.040039f, -449.864075f,  493.728363f, -539.632874f,  587.577576f,
2209               -637.562500f,  689.587585f, -743.652954f,  799.758423f,    0.000000f}));
2210
2211     return LstmUtilsVectorBatchVectorCwiseProductTestImpl<armnn::DataType::Float32>(vector, batchVector,
2212             vecSize, batchSize, expectedOutput);
2213 }
2214
2215
2216 void LstmUtilsVectorBatchVectorAddTest()
2217 {
2218     uint32_t batchSize = 2;
2219     uint32_t vecSize = 3;
2220     armnn::TensorInfo vecDesc({vecSize}, armnn::DataType::Float32);
2221     boost::multi_array<float, 1> vector = MakeTensor<float, 1>(vecDesc, std::vector<float>(
2222             { 0.0f, -0.5f, 1.0f}));
2223
2224     armnn::TensorInfo batchVecDesc({batchSize, vecSize}, armnn::DataType::Float32);
2225     boost::multi_array<float, 2> batchVector = MakeTensor<float, 2>(batchVecDesc, std::vector<float>(
2226             { 1.0f, 2.0f, 3.0f,    //batch 0
2227               4.0f, 5.0f, 6.0f})); //batch 1
2228
2229     boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(batchVecDesc, std::vector<float>(
2230             { 1.0f, 1.5f, 4.0f,
2231               4.0f, 4.5f, 7.0f}));
2232
2233     return LstmUtilsVectorBatchVectorAddTestImpl<armnn::DataType::Float32>(vector, batchVector,
2234             vecSize, batchSize, expectedOutput);
2235 }
2236
2237
2238 LayerTestResult<float, 2> LstmLayerFloat32WithCifgWithPeepholeNoProjectionTest(
2239     armnn::IWorkloadFactory& workloadFactory,
2240     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2241 {
2242     armnn::TensorInfo inputDesc({ 2, 2 }, armnn::DataType::Float32);
2243     boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
2244             { 2., 3., 3., 4. }));
2245
2246     armnn::TensorInfo outputDesc({ 2, 4 }, armnn::DataType::Float32);
2247     boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
2248             {-0.36444446f, -0.00352185f, 0.12886585f, -0.05163646f,
2249              -0.42734814f, -0.00478661f,  0.13455015f, -0.03560682f}));
2250     return LstmLayerWithCifgWithPeepholeNoProjectionTestImpl<armnn::DataType::Float32>(
2251         workloadFactory, memoryManager, input, expectedOutput);
2252 }
2253
2254 LayerTestResult<float, 2> LstmLayerFloat32NoCifgWithPeepholeWithProjectionTest(
2255     armnn::IWorkloadFactory& workloadFactory,
2256     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2257 {
2258     armnn::TensorInfo inputDesc({ 2, 5 }, armnn::DataType::Float32);
2259     boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
2260             {0.787926f, 0.151646f, 0.071352f, 0.118426f, 0.458058f,
2261              0.295743f, 0.544053f, 0.690064f, 0.858138f, 0.497181f}));
2262
2263     armnn::TensorInfo outputDesc({ 2, 16 }, armnn::DataType::Float32);
2264     boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
2265             {-0.00396806f, 0.029352f,     -0.00279226f, 0.0159977f,   -0.00835576f,
2266              -0.0211779f,  0.0283512f,    -0.0114597f,  0.00907307f,  -0.0244004f,
2267              -0.0152191f,  -0.0259063f,   0.00914318f,  0.00415118f,  0.017147f,
2268              0.0134203f, -0.013869f,    0.0287268f,   -0.00334693f, 0.00733398f,  -0.0287926f,
2269              -0.0186926f,   0.0193662f,   -0.0115437f,  0.00422612f,  -0.0345232f,
2270              0.00223253f,   -0.00957321f, 0.0210624f,   0.013331f,    0.0150954f,
2271              0.02168f}));
2272     return LstmLayerNoCifgWithPeepholeWithProjectionTestImpl<armnn::DataType::Float32>(
2273         workloadFactory, memoryManager, input, expectedOutput);
2274 }
2275
2276 LayerTestResult<float, 2> LstmLayerFloat32NoCifgNoPeepholeNoProjectionTest(
2277     armnn::IWorkloadFactory& workloadFactory,
2278     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2279 {
2280     armnn::TensorInfo inputDesc({2, 2}, armnn::DataType::Float32);
2281     boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
2282             {2., 3., 3., 4.}));
2283
2284
2285     armnn::TensorInfo outputDesc({2, 4}, armnn::DataType::Float32);
2286     boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
2287             {{-0.02973187f, 0.1229473f,   0.20885126f, -0.15358765f,
2288               -0.0185422f,   0.11281417f,  0.24466537f, -0.1826292f}}));
2289
2290     return LstmNoCifgNoPeepholeNoProjectionTestImpl<armnn::DataType::Float32>(
2291         workloadFactory, memoryManager, input, expectedOutput);
2292 }
2293
2294
2295 LayerTestResult<float, 2> LstmLayerFloat32NoCifgWithPeepholeWithProjectionWithLayerNormTest(
2296         armnn::IWorkloadFactory& workloadFactory,
2297         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2298 {
2299     armnn::TensorInfo inputDesc({ 2, 5 }, armnn::DataType::Float32);
2300     boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
2301             {0.7f, 0.8f, 0.1f, 0.2f, 0.3f,     //batch 0
2302              0.3f, 0.2f, 0.9f, 0.8f, 0.1f}));  //batch 1
2303
2304     armnn::TensorInfo outputDesc({ 2, 3 }, armnn::DataType::Float32);
2305     boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
2306             {  0.0244077f,  0.128027f, -0.00170918f,    //batch 0
2307              -0.00692428f, 0.0848741f,    0.063445f})); //batch 1
2308     return LstmLayerNoCifgWithPeepholeWithProjectionWithLayerNormTestImpl<armnn::DataType::Float32>(
2309             workloadFactory, memoryManager, input, expectedOutput);
2310 }
2311
2312
2313 LayerTestResult<int16_t, 2> LstmLayerInt16NoCifgNoPeepholeNoProjectionTest(
2314     armnn::IWorkloadFactory& workloadFactory,
2315     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2316 {
2317     const float qScale = 1.0f;
2318     const int32_t qOffset = 0;
2319
2320     const armnn::DataType datatype = armnn::DataType::QuantisedSymm16;
2321     const armnn::DataType constantDatatype = armnn::DataType::QuantisedAsymm8;
2322
2323     armnn::TensorInfo inputDesc({2, 2}, datatype);
2324     boost::multi_array<int16_t , 2> input = MakeTensor<int16_t , 2>(inputDesc, QuantizedVector<int16_t>(qScale, qOffset,
2325             std::vector<float>{2., 3., 3., 4.}));
2326
2327     armnn::TensorInfo outputDesc({2, 4}, datatype);
2328     boost::multi_array<int16_t, 2> expectedOutput = MakeTensor<int16_t, 2>(outputDesc, QuantizedVector<int16_t>(qScale,
2329             qOffset, std::vector<float>({{-0.02973187f, 0.1229473f,   0.20885126f, -0.15358765f,
2330                                           -0.0185422f,  0.11281417f,  0.24466537f, -0.1826292f}})));
2331
2332     return LstmNoCifgNoPeepholeNoProjectionTestImpl<datatype>(
2333         workloadFactory, memoryManager, input, expectedOutput, qScale, qOffset, constantDatatype);
2334
2335 }
2336
2337 LayerTestResult<int16_t, 2> LstmLayerInt16WithCifgWithPeepholeNoProjectionTest(
2338     armnn::IWorkloadFactory& workloadFactory,
2339     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2340 {
2341     const float qScale = 1.0f;
2342     const int32_t qOffset = 0;
2343
2344     const armnn::DataType datatype = armnn::DataType::QuantisedSymm16;
2345     const armnn::DataType constantDatatype = armnn::DataType::QuantisedAsymm8;
2346
2347     armnn::TensorInfo inputDesc({ 2, 2 }, datatype);
2348     boost::multi_array<int16_t, 2> input = MakeTensor<int16_t, 2>(inputDesc, QuantizedVector<int16_t>(qScale, qOffset,
2349             std::vector<float>({ 2., 3., 3., 4. })));
2350
2351     armnn::TensorInfo outputDesc({ 2, 4 }, datatype);
2352     boost::multi_array<int16_t, 2> expectedOutput = MakeTensor<int16_t, 2>(outputDesc, QuantizedVector<int16_t>(qScale,
2353             qOffset, std::vector<float>(
2354             {-0.36444446f, -0.00352185f, 0.12886585f, -0.05163646f,
2355              -0.42734814f, -0.00478661f, 0.13455015f, -0.03560682f})));
2356
2357     return LstmLayerWithCifgWithPeepholeNoProjectionTestImpl<datatype>(
2358         workloadFactory, memoryManager, input, expectedOutput, qScale, qOffset, constantDatatype);
2359 }
2360
2361 LayerTestResult<int16_t, 2> LstmLayerInt16NoCifgWithPeepholeWithProjectionTest(
2362     armnn::IWorkloadFactory& workloadFactory,
2363     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2364 {
2365     const float qScale = 2.0f;
2366     const int32_t qOffset = 0;
2367
2368     const armnn::DataType datatype = armnn::DataType::QuantisedSymm16;
2369     const armnn::DataType constantDatatype = armnn::DataType::QuantisedAsymm8;
2370
2371     armnn::TensorInfo inputDesc({ 2, 5 }, datatype);
2372     boost::multi_array<int16_t, 2> input = MakeTensor<int16_t, 2>(inputDesc, QuantizedVector<int16_t>(qScale,
2373             qOffset, std::vector<float>(
2374             {0.787926f, 0.151646f, 0.071352f, 0.118426f, 0.458058f,
2375              0.295743f, 0.544053f, 0.690064f, 0.858138f, 0.497181f})));
2376
2377     armnn::TensorInfo outputDesc({ 2, 16 }, datatype);
2378     boost::multi_array<int16_t, 2> expectedOutput = MakeTensor<int16_t, 2>(outputDesc, QuantizedVector<int16_t>(qScale,
2379             qOffset, std::vector<float>(
2380             {-0.00396806f,  0.029352f,   -0.00279226f, 0.0159977f,  -0.00835576f,
2381              -0.0211779f,   0.0283512f,  -0.0114597f,  0.00907307f, -0.0244004f,
2382              -0.0152191f,  -0.0259063f,   0.00914318f, 0.00415118f,  0.017147f,
2383               0.0134203f,  -0.013869f,    0.0287268f, -0.00334693f,  0.00733398f, -0.0287926f,
2384              -0.0186926f,   0.0193662f,  -0.0115437f,  0.00422612f, -0.0345232f,
2385               0.00223253f, -0.00957321f,  0.0210624f,  0.013331f,    0.0150954f,   0.02168f})));
2386
2387     return LstmLayerNoCifgWithPeepholeWithProjectionTestImpl<datatype>(
2388         workloadFactory, memoryManager, input, expectedOutput, qScale, qOffset, constantDatatype);
2389 }
2390
2391 LayerTestResult<int16_t, 2> LstmLayerInt16NoCifgNoPeepholeNoProjectionInt16ConstantTest(
2392     armnn::IWorkloadFactory& workloadFactory,
2393     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2394 {
2395     const float qScale = 1.0f;
2396     const int32_t qOffset = 0;
2397
2398     const armnn::DataType datatype = armnn::DataType::QuantisedSymm16; // datatype & constants set to QSymm16
2399
2400     armnn::TensorInfo inputDesc({2, 2}, datatype);
2401     boost::multi_array<int16_t , 2> input = MakeTensor<int16_t , 2>(inputDesc, QuantizedVector<int16_t>(qScale,
2402             qOffset, std::vector<float>{2., 3., 3., 4.}));
2403
2404     armnn::TensorInfo outputDesc({2, 4}, datatype);
2405     boost::multi_array<int16_t, 2> expectedOutput = MakeTensor<int16_t, 2>(outputDesc, QuantizedVector<int16_t>(qScale,
2406             qOffset, std::vector<float>({{-0.02973187f, 0.1229473f,   0.20885126f, -0.15358765f,
2407                                           -0.0185422f,  0.11281417f,  0.24466537f, -0.1826292f}})));
2408
2409     return LstmNoCifgNoPeepholeNoProjectionTestImpl<datatype>(
2410         workloadFactory, memoryManager, input, expectedOutput, qScale, qOffset, datatype);
2411 }
2412
2413 // QuantizedLstm
2414 LayerTestResult<uint8_t, 2> QuantizedLstmTest(
2415     armnn::IWorkloadFactory& workloadFactory,
2416     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2417 {
2418     armnn::TensorInfo inputDesc({2, 2}, armnn::DataType::QuantisedAsymm8);
2419     boost::multi_array<uint8_t, 2> input = MakeTensor<uint8_t, 2>(inputDesc, std::vector<uint8_t>(
2420         {166, 179, 50, 150}));
2421
2422     armnn::TensorInfo outputDesc({2, 4}, armnn::DataType::QuantisedAsymm8);
2423     boost::multi_array<uint8_t, 2> expectedOutput = MakeTensor<uint8_t, 2>(outputDesc, std::vector<uint8_t>(
2424         {140, 151, 146, 112, 136, 156, 142, 112 }));
2425
2426     return QuantizedLstmTestImpl(workloadFactory, memoryManager, input, expectedOutput);
2427 }
2428
2429 LayerTestResult<float,3> ConcatTest(
2430     armnn::IWorkloadFactory& workloadFactory,
2431     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2432 {
2433     unsigned int outputWidth = 3;
2434     unsigned int outputHeight = 6;
2435     unsigned int outputChannels = 3;
2436
2437     unsigned int inputWidth1 = 3;
2438     unsigned int inputHeight1 = 6;
2439     unsigned int inputChannels1 = 2;
2440
2441     unsigned int inputWidth2 = 3;
2442     unsigned int inputHeight2 = 6;
2443     unsigned int inputChannels2 = 1;
2444
2445     // Define the tensor descriptors.
2446     armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::Float32);
2447     armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::Float32);
2448     armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::Float32);
2449
2450     LayerTestResult<float,3> ret(outputTensorInfo);
2451
2452     ret.outputExpected = MakeTensor<float, 3>(outputTensorInfo, std::vector<float>(
2453     {
2454             1.0f, 2.0f, 3.0f,
2455             4.0f, 5.0f, 6.0f,
2456             7.0f, 8.0f, 9.0f,
2457             10.0f, 11.0f, 12.0f,
2458             13.0f, 14.0f, 15.0f,
2459             16.0f, 17.0f, 18.0f,
2460
2461             19.0f, 20.0f, 21.0f,
2462             22.0f, 23.0f, 24.0f,
2463             25.0f, 26.0f, 27.0f,
2464             28.0f, 29.0f, 30.0f,
2465             31.0f, 32.0f, 33.0f,
2466             34.0f, 35.0f, 36.0f,
2467
2468             37.0f, 38.0f, 39.0f,
2469             40.0f, 41.0f, 42.0f,
2470             43.0f, 44.0f, 45.0f,
2471             46.0f, 47.0f, 48.0f,
2472             49.0f, 50.0f, 51.0f,
2473             52.0f, 53.0f, 54.0f,
2474         })
2475     );
2476
2477     auto input1 = MakeTensor<float, 3>(inputTensorInfo1, std::vector<float>(
2478         {
2479             1.0f, 2.0f, 3.0f,
2480             4.0f, 5.0f, 6.0f,
2481             7.0f, 8.0f, 9.0f,
2482             10.0f, 11.0f, 12.0f,
2483             13.0f, 14.0f, 15.0f,
2484             16.0f, 17.0f, 18.0f,
2485
2486             19.0f, 20.0f, 21.0f,
2487             22.0f, 23.0f, 24.0f,
2488             25.0f, 26.0f, 27.0f,
2489             28.0f, 29.0f, 30.0f,
2490             31.0f, 32.0f, 33.0f,
2491             34.0f, 35.0f, 36.0f,
2492         })
2493     );
2494
2495     auto input2 = MakeTensor<float, 3>(inputTensorInfo2, std::vector<float>(
2496         {
2497             37.0f, 38.0f, 39.0f,
2498             40.0f, 41.0f, 42.0f,
2499             43.0f, 44.0f, 45.0f,
2500             46.0f, 47.0f, 48.0f,
2501             49.0f, 50.0f, 51.0f,
2502             52.0f, 53.0f, 54.0f,
2503         })
2504     );
2505
2506     std::vector<unsigned int> wOrigin1 = {0, 0, 0}; //Extent of the window is defined by size of input[0].
2507     armnn::ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
2508
2509     std::vector<unsigned int> wOrigin2 = {2, 0, 0}; //Extent of the window is defined by size of input[1].
2510     armnn::ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
2511
2512     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2513
2514     bool subTensorsSupported = workloadFactory.SupportsSubTensors();
2515
2516     std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
2517         subTensorsSupported ?
2518             workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
2519             workloadFactory.CreateTensorHandle(inputTensorInfo1);
2520
2521     std::unique_ptr<armnn::ITensorHandle> inputHandle2  =
2522         subTensorsSupported ?
2523             workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
2524             workloadFactory.CreateTensorHandle(inputTensorInfo2);
2525
2526     armnn::ConcatQueueDescriptor data;
2527     armnn::WorkloadInfo info;
2528     AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2529     AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2530     AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2531
2532     data.m_ViewOrigins.push_back(window1);
2533     data.m_ViewOrigins.push_back(window2);
2534
2535     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConcat(data, info);
2536
2537     inputHandle1->Allocate();
2538     inputHandle2->Allocate();
2539     outputHandle->Allocate();
2540
2541     CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
2542     CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
2543
2544     workload->PostAllocationConfigure();
2545     workload->Execute();
2546
2547     CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
2548
2549     return ret;
2550 }
2551
2552 LayerTestResult<float,4> AdditionTest(
2553     armnn::IWorkloadFactory& workloadFactory,
2554     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2555 {
2556     unsigned int batchSize = 2;
2557     unsigned int channels  = 2;
2558     unsigned int height    = 2;
2559     unsigned int width     = 3;
2560
2561     armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
2562     armnn::TensorInfo outputTensorInfo;
2563
2564     unsigned int shape[] = {batchSize, channels, height, width};
2565
2566     inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
2567     inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
2568     outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
2569
2570
2571     auto input1 = MakeTensor<float, 4>(inputTensorInfo1, std::vector<float>(
2572         {
2573             0.0f, 2.0f, 1.0f,
2574             0.2f, 1.0f, 2.0f,
2575
2576             1.0f, 2.0f, 1.0f,
2577             0.2f, 1.0f, 2.0f,
2578
2579             0.0f, 2.0f, 1.0f,
2580             4.2f, 1.0f, 2.0f,
2581
2582             0.0f, 0.0f, 1.0f,
2583             0.2f, 1.0f, 2.0f,
2584         }));
2585
2586     auto input2 = MakeTensor<float, 4>(inputTensorInfo2, std::vector<float>(
2587         {
2588             1.0f, 2.0f, 1.0f,
2589             0.0f, 1.0f, 2.0f,
2590
2591             1.0f, 2.0f, -2.0f,
2592             0.2f, 1.0f, 2.0f,
2593
2594             0.0f, 2.0f, 1.0f,
2595             4.2f, 0.0f, -3.0f,
2596
2597             0.0f, 0.0f, 1.0f,
2598             0.7f, 1.0f, 5.0f,
2599         }));
2600
2601     LayerTestResult<float,4> ret(outputTensorInfo);
2602     ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo, std::vector<float>(
2603         {
2604             1.0f, 4.0f, 2.0f,
2605             0.2f, 2.0f, 4.0f,
2606
2607             2.0f, 4.0f, -1.0f,
2608             0.4f, 2.0f, 4.0f,
2609
2610             0.0f, 4.0f, 2.0f,
2611             8.4f, 1.0f, -1.0f,
2612
2613             0.0f, 0.0f, 2.0f,
2614             0.9f, 2.0f, 7.0f,
2615         }));
2616
2617     std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
2618     std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
2619     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2620
2621     armnn::AdditionQueueDescriptor data;
2622     armnn::WorkloadInfo info;
2623     AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2624     AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2625     AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2626
2627     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
2628
2629     inputHandle1->Allocate();
2630     inputHandle2->Allocate();
2631     outputHandle->Allocate();
2632
2633     CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
2634     CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
2635
2636     workload->PostAllocationConfigure();
2637     workload->Execute();
2638
2639     CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
2640
2641     return ret;
2642 }
2643
2644 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
2645 LayerTestResult<T, 4> AdditionBroadcastTestImpl(
2646     armnn::IWorkloadFactory& workloadFactory,
2647     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2648     float qScale,
2649     int32_t qOffset)
2650 {
2651     armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 1}, ArmnnType);
2652     armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 2, 3}, ArmnnType);
2653     armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
2654
2655     if (armnn::IsQuantizedType<T>())
2656     {
2657         inputTensorInfo1.SetQuantizationScale(qScale);
2658         inputTensorInfo1.SetQuantizationOffset(qOffset);
2659         inputTensorInfo2.SetQuantizationScale(qScale);
2660         inputTensorInfo2.SetQuantizationOffset(qOffset);
2661         outputTensorInfo.SetQuantizationScale(qScale);
2662         outputTensorInfo.SetQuantizationOffset(qOffset);
2663     }
2664
2665     auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset,
2666         {
2667             0.0f,
2668             1.0f,
2669
2670             2.0f,
2671             3.0f,
2672
2673             4.0f,
2674             5.0f,
2675         }));
2676
2677     auto input2 = MakeTensor<T, 4>(inputTensorInfo2, QuantizedVector<T>(qScale, qOffset,
2678         {
2679             0.5f, 1.5f, 2.5f,
2680             3.5f, 4.5f, 5.5f,
2681         }));
2682
2683     LayerTestResult<T,4> ret(outputTensorInfo);
2684     ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset,
2685         {
2686             0.5f, 1.5f, 2.5f,
2687             4.5f, 5.5f, 6.5f,
2688
2689             2.5f, 3.5f, 4.5f,
2690             6.5f, 7.5f, 8.5f,
2691
2692             4.5f, 5.5f, 6.5f,
2693             8.5f, 9.5f, 10.5f,
2694         }));
2695
2696     std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
2697     std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
2698     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2699
2700     armnn::AdditionQueueDescriptor data;
2701     armnn::WorkloadInfo info;
2702     AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2703     AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2704     AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2705
2706     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
2707
2708     inputHandle1->Allocate();
2709     inputHandle2->Allocate();
2710     outputHandle->Allocate();
2711
2712     CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
2713     CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
2714
2715     workload->PostAllocationConfigure();
2716     workload->Execute();
2717
2718     CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
2719
2720     return ret;
2721 }
2722
2723 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
2724 LayerTestResult<T, 4> AdditionBroadcast1ElementTestImpl(
2725     armnn::IWorkloadFactory& workloadFactory,
2726     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2727     float qScale,
2728     int32_t qOffset)
2729 {
2730     armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
2731     armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 1, 1}, ArmnnType);
2732     armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
2733
2734     if (armnn::IsQuantizedType<T>())
2735     {
2736         inputTensorInfo1.SetQuantizationScale(qScale);
2737         inputTensorInfo1.SetQuantizationOffset(qOffset);
2738         inputTensorInfo2.SetQuantizationScale(qScale);
2739         inputTensorInfo2.SetQuantizationOffset(qOffset);
2740         outputTensorInfo.SetQuantizationScale(qScale);
2741         outputTensorInfo.SetQuantizationOffset(qOffset);
2742     }
2743
2744     auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset,
2745         {
2746              0.0f,  1.0f,  2.0f,
2747              3.0f,  4.0f,  5.0f,
2748              6.0f,  7.0f,  8.0f,
2749              9.0f, 10.0f, 11.0f,
2750             12.0f, 13.0f, 14.0f,
2751             15.0f, 16.0f, 17.0f,
2752         }));
2753
2754     auto input2 = MakeTensor<T, 4>(inputTensorInfo2, QuantizedVector<T>(qScale, qOffset,
2755         {
2756             0.5f,
2757         }));
2758
2759     LayerTestResult<T,4> ret(outputTensorInfo);
2760     ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset,
2761         {
2762              0.5f,  1.5f,  2.5f,
2763              3.5f,  4.5f,  5.5f,
2764              6.5f,  7.5f,  8.5f,
2765              9.5f, 10.5f, 11.5f,
2766             12.5f, 13.5f, 14.5f,
2767             15.5f, 16.5f, 17.5f,
2768         }));
2769
2770     std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
2771     std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
2772     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2773
2774     armnn::AdditionQueueDescriptor data;
2775     armnn::WorkloadInfo info;
2776     AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2777     AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2778     AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2779
2780     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
2781
2782     inputHandle1->Allocate();
2783     inputHandle2->Allocate();
2784     outputHandle->Allocate();
2785
2786     CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
2787     CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
2788
2789     workload->PostAllocationConfigure();
2790     workload->Execute();
2791
2792     CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
2793
2794     return ret;
2795 }
2796
2797 LayerTestResult<float, 4> AdditionBroadcastTest(
2798     armnn::IWorkloadFactory& workloadFactory,
2799     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2800 {
2801     return AdditionBroadcastTestImpl<armnn::DataType::Float32>(
2802         workloadFactory, memoryManager, 0.0f, 0);
2803 }
2804
2805 LayerTestResult<uint8_t, 4> AdditionBroadcastUint8Test(
2806     armnn::IWorkloadFactory& workloadFactory,
2807     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2808 {
2809     return AdditionBroadcastTestImpl<armnn::DataType::QuantisedAsymm8>(
2810         workloadFactory, memoryManager, 2.f, 0);
2811 }
2812
2813 LayerTestResult<int16_t, 4> AdditionBroadcastInt16Test(
2814     armnn::IWorkloadFactory& workloadFactory,
2815     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2816 {
2817     return AdditionBroadcastTestImpl<armnn::DataType::QuantisedSymm16>(
2818         workloadFactory, memoryManager, 2.f, 0);
2819 }
2820
2821 LayerTestResult<float, 4> AdditionBroadcast1ElementTest(
2822     armnn::IWorkloadFactory& workloadFactory,
2823     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2824 {
2825     return AdditionBroadcast1ElementTestImpl<armnn::DataType::Float32>(
2826         workloadFactory, memoryManager, 0.0f, 0);
2827 }
2828
2829 LayerTestResult<uint8_t, 4> AdditionBroadcast1ElementUint8Test(
2830     armnn::IWorkloadFactory& workloadFactory,
2831     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2832 {
2833     return AdditionBroadcast1ElementTestImpl<armnn::DataType::QuantisedAsymm8>(
2834         workloadFactory, memoryManager, 0.1333333f, 128);
2835 }
2836
2837 LayerTestResult<int16_t, 4> AdditionBroadcast1ElementInt16Test(
2838     armnn::IWorkloadFactory& workloadFactory,
2839     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2840 {
2841     return AdditionBroadcast1ElementTestImpl<armnn::DataType::QuantisedSymm16>(
2842         workloadFactory, memoryManager, 0.1333333f, 0);
2843 }
2844
2845 LayerTestResult<float,4> CompareAdditionTest(
2846     armnn::IWorkloadFactory& workloadFactory,
2847     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2848     armnn::IWorkloadFactory& refWorkloadFactory)
2849 {
2850     unsigned int batchSize = 4;
2851     unsigned int channels  = 1;
2852     unsigned int height    = 2;
2853     unsigned int width     = 3;
2854
2855     armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
2856     armnn::TensorInfo outputTensorInfo;
2857
2858     unsigned int shape[] = {batchSize, channels, height, width};
2859
2860     inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
2861     inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
2862     outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
2863
2864     auto input1 = MakeRandomTensor<float, 4>(inputTensorInfo1, 1232);
2865     auto input2 = MakeRandomTensor<float, 4>(inputTensorInfo2, 456);
2866
2867     LayerTestResult<float,4> ret(outputTensorInfo);
2868
2869     std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
2870     std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
2871     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2872
2873     std::unique_ptr<armnn::ITensorHandle> inputHandle1Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo1);
2874     std::unique_ptr<armnn::ITensorHandle> inputHandle2Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo2);
2875     std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
2876
2877     armnn::AdditionQueueDescriptor data;
2878     armnn::WorkloadInfo info;
2879     AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2880     AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2881     AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2882
2883     armnn::AdditionQueueDescriptor refData = data;
2884     armnn::WorkloadInfo refInfo = info;
2885     SetWorkloadInput(refData, refInfo, 0, inputTensorInfo1, inputHandle1Ref.get());
2886     SetWorkloadInput(refData, refInfo, 1, inputTensorInfo2, inputHandle2Ref.get());
2887     SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
2888
2889     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
2890     std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateAddition(refData, refInfo);
2891
2892     inputHandle1->Allocate();
2893     inputHandle2->Allocate();
2894     outputHandle->Allocate();
2895     inputHandle1Ref->Allocate();
2896     inputHandle2Ref->Allocate();
2897     outputHandleRef->Allocate();
2898
2899     CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
2900     CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
2901     CopyDataToITensorHandle(inputHandle1Ref.get(), &input1[0][0][0][0]);
2902     CopyDataToITensorHandle(inputHandle2Ref.get(), &input2[0][0][0][0]);
2903
2904     workload->PostAllocationConfigure();
2905     workload->Execute();
2906     workloadRef->PostAllocationConfigure();
2907     workloadRef->Execute();
2908
2909     CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
2910     CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
2911
2912     return ret;
2913 }
2914
2915 namespace {
2916 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
2917 LayerTestResult<T, 4> DivisionTestHelper(
2918     armnn::IWorkloadFactory& workloadFactory,
2919     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2920     const unsigned int shape0[4],
2921     const std::vector<T>& values0,
2922     float scale0,
2923     int32_t offset0,
2924     const unsigned int shape1[4],
2925     const std::vector<T> & values1,
2926     float scale1,
2927     int32_t offset1,
2928     const unsigned int outShape[4],
2929     const std::vector<T> & outValues,
2930     float outScale,
2931     int32_t outOffset)
2932 {
2933     armnn::TensorInfo inputTensorInfo0(4, shape0, ArmnnType);
2934     armnn::TensorInfo inputTensorInfo1(4, shape1, ArmnnType);
2935     armnn::TensorInfo outputTensorInfo(4, outShape, ArmnnType);
2936
2937     inputTensorInfo0.SetQuantizationScale(scale0);
2938     inputTensorInfo0.SetQuantizationOffset(offset0);
2939
2940     inputTensorInfo1.SetQuantizationScale(scale1);
2941     inputTensorInfo1.SetQuantizationOffset(offset1);
2942
2943     outputTensorInfo.SetQuantizationScale(outScale);
2944     outputTensorInfo.SetQuantizationOffset(outOffset);
2945
2946     auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
2947     auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
2948
2949     LayerTestResult<T, 4> result(outputTensorInfo);
2950     result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
2951
2952     std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
2953     std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
2954     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2955
2956     armnn::DivisionQueueDescriptor data;
2957     armnn::WorkloadInfo info;
2958     AddInputToWorkload(data,  info, inputTensorInfo0, inputHandle0.get());
2959     AddInputToWorkload(data,  info, inputTensorInfo1, inputHandle1.get());
2960     AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2961
2962     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateDivision(data, info);
2963
2964     inputHandle0->Allocate();
2965     inputHandle1->Allocate();
2966     outputHandle->Allocate();
2967
2968     CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
2969     CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
2970
2971     workload->PostAllocationConfigure();
2972     workload->Execute();
2973
2974     CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
2975
2976     return result;
2977 }
2978 } // anonymous namespace
2979
2980 LayerTestResult<float,4> DivisionByZeroTest(
2981     armnn::IWorkloadFactory& workloadFactory,
2982     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2983 {
2984     const unsigned int width = 2;
2985     const unsigned int height = 2;
2986     const unsigned int channelCount = 2;
2987     const unsigned int batchSize = 2;
2988
2989     unsigned int shape[] = { batchSize, channelCount, height, width };
2990
2991     std::vector<float> input0({
2992                                 1.f,  1.f,  1.f,  1.f,  0.f, 0.f, 0.f, 0.f,
2993                                -1.f, -1.f, -1.f, -1.f,  5.f, 5.f, 5.f, 5.f });
2994
2995     std::vector<float> input1({
2996                                0.f, 0.f, -0.f, -0.f,  0.f, 0.f, -0.f, -0.f,
2997                                0.f, 0.f, -0.f, -0.f,  5.f, 5.f,  5.f,  5.f });
2998
2999     std::vector<float> output({
3000                                INFINITY, INFINITY, -INFINITY, -INFINITY,  NAN, NAN, -NAN, -NAN,
3001                                -INFINITY, -INFINITY, INFINITY, INFINITY,  1, 1, 1, 1 });
3002
3003     return DivisionTestHelper<armnn::DataType::Float32>(workloadFactory,
3004                                                         memoryManager,
3005                                                         shape, input0, 1.0f, 0,
3006                                                         shape, input1, 1.0f, 0,
3007                                                         shape, output, 1.0f, 0);
3008 }
3009
3010 LayerTestResult<float,4> DivisionTest(
3011     armnn::IWorkloadFactory& workloadFactory,
3012     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3013 {
3014     const unsigned int width = 2;
3015     const unsigned int height = 2;
3016     const unsigned int channelCount = 2;
3017     const unsigned int batchSize = 2;
3018
3019     unsigned int shape[] = { batchSize, channelCount, height, width };
3020
3021     std::vector<float> input0({
3022                                       2,  2,  2,  2,    3,  3,  3,  3,
3023                                       4,  4,  4,  4,    5,  5,  5,  5 });
3024
3025     std::vector<float> input1({
3026                                       1,  1,  1,  1,    2,  2,  2,  2,
3027                                       4,  4,  4,  4,    4,  4,  4,  4 });
3028
3029     std::vector<float> output({
3030                                       2,  2,  2,  2,    1.5,  1.5,  1.5,  1.5,
3031                                       1, 1, 1, 1,  1.25, 1.25, 1.25, 1.25 });
3032
3033
3034     return DivisionTestHelper<armnn::DataType::Float32>(workloadFactory,
3035                                                         memoryManager,
3036                                                         shape, input0, 1.0f, 0,
3037                                                         shape, input1, 1.0f, 0,
3038                                                         shape, output, 1.0f, 0);
3039 }
3040
3041 LayerTestResult<float, 4> DivisionBroadcast1ElementTest(
3042     armnn::IWorkloadFactory& workloadFactory,
3043     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3044 {
3045     unsigned int shape0[] = { 1, 2, 2, 2 };
3046     std::vector<float> input0({ 2, 4, 6, 8, 10, 12, 14, 16});
3047
3048     unsigned int shape1[] = { 1, 1, 1, 1 };
3049     std::vector<float> input1({ 2 });
3050
3051     std::vector<float> output({ 1, 2, 3, 4, 5, 6, 7, 8});
3052
3053
3054     return DivisionTestHelper<armnn::DataType::Float32>(workloadFactory,
3055                                                         memoryManager,
3056                                                         shape0, input0, 1.0f, 0,
3057                                                         shape1, input1, 1.0f, 0,
3058                                                         shape0, output, 1.0f, 0);
3059 }
3060
3061 LayerTestResult<float, 4> DivisionBroadcast1DVectorTest(
3062     armnn::IWorkloadFactory& workloadFactory,
3063     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3064 {
3065     unsigned int shape0[] = { 1, 3, 3, 2 };
3066     std::vector<float> input0({
3067                                       1,   4,       3,  8,      5, 12,
3068                                       7,   16,      9, 20,     11, 24,
3069                                       13,  28,     15, 32,     17, 36});
3070
3071     unsigned int shape1[] = { 1, 1, 1, 2 };
3072     std::vector<float> input1({ 1, 2 });
3073
3074     std::vector<float> output({
3075                                       1,   2,      3,  4,      5,  6,
3076                                       7,   8,      9, 10,     11, 12,
3077                                       13, 14,     15, 16,     17, 18});
3078
3079     return DivisionTestHelper<armnn::DataType::Float32>(workloadFactory,
3080                                                         memoryManager,
3081                                                         shape0, input0, 1.0f, 0,
3082                                                         shape1, input1, 1.0f, 0,
3083                                                         shape0, output, 1.0f, 0);
3084 }
3085
3086 LayerTestResult<uint8_t,4> DivisionUint8Test(
3087     armnn::IWorkloadFactory& workloadFactory,
3088     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3089 {
3090     const unsigned int width = 2;
3091     const unsigned int height = 2;
3092     const unsigned int channelCount = 2;
3093     const unsigned int batchSize = 2;
3094
3095     unsigned int shape[] = { batchSize, channelCount, height, width };
3096
3097     std::vector<uint8_t> input0({2,  2,  2,  2,    3,  3,  3,  3,
3098                                  4,  4,  4,  4,    5,  5,  5,  5 });
3099
3100     std::vector<uint8_t> input1({1,  1,  1,  1,    2,  2,  2,  2,
3101                                  4,  4,  4,  4,    4,  4,  4,  4 });
3102
3103     std::vector<uint8_t> output({8,  8,  8,  8,    6,  6,  6,  6,
3104                                  4,  4,  4,  4,    5,  5,  5,  5});
3105
3106
3107     return DivisionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
3108                                                                 memoryManager,
3109                                                                 shape, input0, 1.0f,  0,
3110                                                                 shape, input1, 1.0f,  0,
3111                                                                 shape, output, 0.25f, 0);
3112 }
3113
3114 LayerTestResult<uint8_t, 4> DivisionBroadcast1ElementUint8Test(
3115     armnn::IWorkloadFactory& workloadFactory,
3116     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3117 {
3118     unsigned int shape0[] = { 1, 2, 2, 2 };
3119     std::vector<uint8_t> input0({ 2, 4, 6, 8, 10, 12, 14, 16});
3120
3121     unsigned int shape1[] = { 1, 1, 1, 1 };
3122     std::vector<uint8_t> input1({ 2 });
3123
3124     std::vector<uint8_t> output({ 1, 2, 3, 4, 5, 6, 7, 8});
3125
3126     return DivisionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
3127                                                                 memoryManager,
3128                                                                 shape0, input0, 1.0f, 0,
3129                                                                 shape1, input1, 1.0f, 0,
3130                                                                 shape0, output, 1.0f, 0);
3131 }
3132
3133 LayerTestResult<uint8_t, 4> DivisionBroadcast1DVectorUint8Test(
3134     armnn::IWorkloadFactory& workloadFactory,
3135     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3136 {
3137     unsigned int shape0[] = { 1, 3, 3, 2 };
3138     std::vector<uint8_t> input0({1,   4,     3,  8,      5,  12,
3139                                  7,   16,    9,  20,     11, 24,
3140                                  13,  28,    15, 32,     17, 36});
3141
3142     unsigned int shape1[] = { 1, 1, 1, 2 };
3143     std::vector<uint8_t> input1({ 1, 2 });
3144
3145     std::vector<uint8_t> output({1,   2,      3,  4,      5,  6,
3146                                  7,   8,      9, 10,     11, 12,
3147                                  13, 14,     15, 16,     17, 18});
3148
3149     return DivisionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
3150                                                                 memoryManager,
3151                                                                 shape0, input0, 1.0f, 0,
3152                                                                 shape1, input1, 1.0f, 0,
3153                                                                 shape0, output, 1.0f, 0);
3154 }
3155
3156 LayerTestResult<int16_t,4> DivisionInt16Test(
3157     armnn::IWorkloadFactory& workloadFactory,
3158     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3159 {
3160     unsigned int shape[] = { 2, 2, 2, 2 };
3161
3162     std::vector<int16_t> input0({2,  2,  2,  2,    3,  3,  3,  3,
3163                                  4,  4,  4,  4,    5,  5,  5,  5 });
3164
3165     std::vector<int16_t> input1({1,  1,  1,  1,    2,  2,  2,  2,
3166                                  4,  4,  4,  4,    4,  4,  4,  4 });
3167
3168     std::vector<int16_t> output({8,  8,  8,  8,    6,  6,  6,  6,
3169                                  4,  4,  4,  4,    5,  5,  5,  5});
3170
3171
3172     return DivisionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
3173                                                                 memoryManager,
3174                                                                 shape, input0, 1.0f,  0,
3175                                                                 shape, input1, 1.0f,  0,
3176                                                                 shape, output, 0.25f, 0);
3177 }
3178
3179 LayerTestResult<int16_t, 4> DivisionBroadcast1ElementInt16Test(
3180     armnn::IWorkloadFactory& workloadFactory,
3181     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3182 {
3183     unsigned int shape0[] = { 1, 2, 2, 2 };
3184     std::vector<int16_t> input0({ 2, 4, 6, 8, 10, 12, 14, 16});
3185
3186     unsigned int shape1[] = { 1, 1, 1, 1 };
3187     std::vector<int16_t> input1({ 2 });
3188
3189     std::vector<int16_t> output({ 1, 2, 3, 4, 5, 6, 7, 8});
3190
3191     return DivisionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
3192                                                                 memoryManager,
3193                                                                 shape0, input0, 1.0f, 0,
3194                                                                 shape1, input1, 1.0f, 0,
3195                                                                 shape0, output, 1.0f, 0);
3196 }
3197
3198 LayerTestResult<int16_t, 4> DivisionBroadcast1DVectorInt16Test(
3199     armnn::IWorkloadFactory& workloadFactory,
3200     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3201 {
3202     unsigned int shape0[] = { 1, 3, 3, 2 };
3203     std::vector<int16_t> input0({1,   4,     3,  8,      5,  12,
3204                                  7,   16,    9,  20,     11, 24,
3205                                  13,  28,    15, 32,     17, 36});
3206
3207     unsigned int shape1[] = { 1, 1, 1, 2 };
3208     std::vector<int16_t> input1({ 1, 2 });
3209
3210     std::vector<int16_t> output({1,   2,      3,  4,      5,  6,
3211                                  7,   8,      9, 10,     11, 12,
3212                                  13, 14,     15, 16,     17, 18});
3213
3214     return DivisionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
3215                                                                 memoryManager,
3216                                                                 shape0, input0, 1.0f, 0,
3217                                                                 shape1, input1, 1.0f, 0,
3218                                                                 shape0, output, 1.0f, 0);
3219 }
3220
3221 template<typename DescriptorType>
3222 std::unique_ptr<armnn::IWorkload> CreateWorkload(
3223     const armnn::IWorkloadFactory& workloadFactory,
3224     const armnn::WorkloadInfo& info,
3225     const DescriptorType& descriptor)
3226 {
3227     return CreateWorkload(workloadFactory, info, descriptor);
3228 };
3229
3230 template<>
3231 std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::MaximumQueueDescriptor>(
3232     const armnn::IWorkloadFactory& workloadFactory,
3233     const armnn::WorkloadInfo& info,
3234     const armnn::MaximumQueueDescriptor& descriptor)
3235 {
3236     return workloadFactory.CreateMaximum(descriptor, info);
3237 }
3238
3239 template<>
3240 std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::MinimumQueueDescriptor>(
3241     const armnn::IWorkloadFactory& workloadFactory,
3242     const armnn::WorkloadInfo& info,
3243     const armnn::MinimumQueueDescriptor& descriptor)
3244 {
3245     return workloadFactory.CreateMinimum(descriptor, info);
3246 }
3247
3248 template<>
3249 std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::EqualQueueDescriptor>(
3250         const armnn::IWorkloadFactory& workloadFactory,
3251         const armnn::WorkloadInfo& info,
3252         const armnn::EqualQueueDescriptor& descriptor)
3253 {
3254     return workloadFactory.CreateEqual(descriptor, info);
3255 }
3256
3257 template<>
3258 std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::GreaterQueueDescriptor>(
3259         const armnn::IWorkloadFactory& workloadFactory,
3260         const armnn::WorkloadInfo& info,
3261         const armnn::GreaterQueueDescriptor& descriptor)
3262 {
3263     return workloadFactory.CreateGreater(descriptor, info);
3264 }
3265
3266 namespace {
3267
3268 template <typename Descriptor,
3269           armnn::DataType ArmnnTypeInput,
3270           armnn::DataType ArmnnTypeOutput,
3271           typename TInput = armnn::ResolveType<ArmnnTypeInput>,
3272           typename TOutput = armnn::ResolveType<ArmnnTypeOutput>>
3273 LayerTestResult<TOutput, 4> ElementwiseTestHelper(
3274     armnn::IWorkloadFactory & workloadFactory,
3275     const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,
3276     const unsigned int shape0[4], std::vector<TInput> values0,
3277     const unsigned int shape1[4], std::vector<TInput> values1,
3278     const unsigned int outShape[4], std::vector<TOutput> outValues,
3279     float qScale = 0.0f, int qOffset = 0)
3280 {
3281     const uint32_t dimensionCount = 4;
3282     armnn::TensorInfo inputTensorInfo0{dimensionCount, shape0, ArmnnTypeInput};
3283     armnn::TensorInfo inputTensorInfo1{dimensionCount, shape1, ArmnnTypeInput};
3284     armnn::TensorInfo outputTensorInfo{dimensionCount, outShape, ArmnnTypeOutput};
3285
3286     auto input0 = MakeTensor<TInput, 4>(inputTensorInfo0, values0);
3287     auto input1 = MakeTensor<TInput, 4>(inputTensorInfo1, values1);
3288
3289     if (armnn::IsQuantizedType<TInput>())
3290     {
3291         inputTensorInfo0.SetQuantizationScale(qScale);
3292         inputTensorInfo0.SetQuantizationOffset(qOffset);
3293
3294         inputTensorInfo1.SetQuantizationScale(qScale);
3295         inputTensorInfo1.SetQuantizationOffset(qOffset);
3296
3297         outputTensorInfo.SetQuantizationScale(qScale);
3298         outputTensorInfo.SetQuantizationOffset(qOffset);
3299     }
3300
3301     LayerTestResult<TOutput,4> ret(outputTensorInfo);
3302
3303     if(ArmnnTypeOutput == armnn::DataType::Boolean)
3304     {
3305         ret.compareBoolean = true;
3306     }
3307
3308     std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
3309     std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
3310     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3311
3312     Descriptor data;
3313     armnn::WorkloadInfo info;
3314     AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
3315     AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
3316     AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
3317     auto workload = CreateWorkload<Descriptor>(workloadFactory, info, data);
3318
3319     inputHandle0->Allocate();
3320     inputHandle1->Allocate();
3321     outputHandle->Allocate();
3322
3323     CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
3324     CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
3325
3326     workload->PostAllocationConfigure();
3327     ExecuteWorkload(*workload, memoryManager);
3328
3329     CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
3330
3331     ret.outputExpected = MakeTensor<TOutput, 4>(outputTensorInfo, outValues);
3332     return ret;
3333 }
3334
3335 template <typename Descriptor, armnn::DataType ArmnnT, typename T = armnn::ResolveType<ArmnnT>>
3336 LayerTestResult<T, 4> ElementwiseTestHelper(
3337     armnn::IWorkloadFactory & workloadFactory,
3338     const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,
3339     const unsigned int shape0[4], std::vector<T> values0,
3340     const unsigned int shape1[4], std::vector<T> values1,
3341     const unsigned int outShape[4], std::vector<T> outValues,
3342     float qScale = 0.0f, int qOffset = 0)
3343 {
3344     return ElementwiseTestHelper<Descriptor, ArmnnT, ArmnnT>
3345         (workloadFactory,
3346          memoryManager,
3347          shape0,
3348          values0,
3349          shape1,
3350          values1,
3351          outShape,
3352          outValues,
3353          qScale,
3354          qOffset);
3355 }
3356 }
3357
3358 LayerTestResult<uint8_t, 4> EqualSimpleTest(armnn::IWorkloadFactory& workloadFactory,
3359                                             const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3360 {
3361     const unsigned int width = 2;
3362     const unsigned int height = 2;
3363     const unsigned int channelCount = 2;
3364     const unsigned int batchSize = 2;
3365
3366     unsigned int shape[] = { batchSize, channelCount, height, width };
3367
3368     std::vector<float> input0({ 1, 1, 1, 1,  5, 5, 5, 5,
3369                                 3, 3, 3, 3,  4, 4, 4, 4 });
3370
3371     std::vector<float> input1({ 1, 1, 1, 1,  3, 3, 3, 3,
3372                                 5, 5, 5, 5,  4, 4, 4, 4 });
3373
3374     std::vector<uint8_t> output({ 1, 1, 1, 1,  0, 0, 0, 0,
3375                                   0, 0, 0, 0,  1, 1, 1, 1 });
3376
3377     return ElementwiseTestHelper<armnn::EqualQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
3378         workloadFactory,
3379         memoryManager,
3380         shape,
3381         input0,
3382         shape,
3383         input1,
3384         shape,
3385         output);
3386 }
3387
3388 LayerTestResult<uint8_t, 4> EqualBroadcast1ElementTest(
3389         armnn::IWorkloadFactory& workloadFactory,
3390         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3391 {
3392     unsigned int shape0[] = { 1, 2, 2, 2 };
3393     std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
3394
3395     unsigned int shape1[] = { 1, 1, 1, 1 };
3396     std::vector<float> input1({ 1 });
3397
3398     std::vector<uint8_t> output({ 1, 0, 0, 0, 0, 0, 0, 0});
3399
3400     return ElementwiseTestHelper<armnn::EqualQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
3401         workloadFactory,
3402         memoryManager,
3403         shape0,
3404         input0,
3405         shape1,
3406         input1,
3407         shape0,
3408         output);
3409 }
3410
3411 LayerTestResult<uint8_t, 4> EqualBroadcast1DVectorTest(
3412         armnn::IWorkloadFactory& workloadFactory,
3413         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3414 {
3415     const unsigned int shape0[] = { 1, 2, 2, 3 };
3416     const unsigned int shape1[] = { 1, 1, 1, 3 };
3417
3418     std::vector<float> input0({ 1, 2, 3, 4, 5, 6,
3419                                 7, 8, 9, 10, 11, 12 });
3420
3421     std::vector<float> input1({ 1, 2, 3});
3422
3423     std::vector<uint8_t> output({ 1, 1, 1, 0, 0, 0,
3424                                   0, 0, 0, 0, 0, 0 });
3425
3426     return ElementwiseTestHelper<armnn::EqualQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
3427         workloadFactory,
3428         memoryManager,
3429         shape0,
3430         input0,
3431         shape1,
3432         input1,
3433         shape0,
3434         output);
3435 }
3436
3437 LayerTestResult<uint8_t, 4> EqualUint8Test(
3438         armnn::IWorkloadFactory& workloadFactory,
3439         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3440 {
3441     unsigned int shape[] = { 2, 2, 2, 2 };
3442
3443     // See dequantized values to the right.
3444     std::vector<uint8_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
3445                                   3, 3, 3, 3, 7, 7, 7, 7 });
3446
3447     std::vector<uint8_t> input1({ 2, 2, 2, 2, 6, 6, 6, 6,
3448                                   3, 3, 3, 3, 5, 5, 5, 5 });
3449
3450     std::vector<uint8_t> output({ 0, 0, 0, 0, 1, 1, 1, 1,
3451                                   1, 1, 1, 1, 0, 0, 0, 0 });
3452
3453     return ElementwiseTestHelper<armnn::EqualQueueDescriptor,
3454                                  armnn::DataType::QuantisedAsymm8,
3455                                  armnn::DataType::Boolean>(
3456         workloadFactory,
3457         memoryManager,
3458         shape,
3459         input0,
3460         shape,
3461         input1,
3462         shape,
3463         output,
3464         1.0f,
3465         0);
3466 }
3467
3468 LayerTestResult<uint8_t, 4> EqualBroadcast1ElementUint8Test(
3469         armnn::IWorkloadFactory& workloadFactory,
3470         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3471 {
3472     const unsigned int shape0[] = { 1, 2, 2, 3 };
3473     const unsigned int shape1[] = { 1, 1, 1, 1 };
3474
3475     std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
3476                                   7, 8, 9, 10, 11, 12 });
3477
3478     std::vector<uint8_t> input1({ 1 });
3479
3480     std::vector<uint8_t> output({ 1, 0, 0, 0, 0, 0,
3481                                   0, 0, 0, 0, 0, 0 });
3482
3483     return ElementwiseTestHelper<armnn::EqualQueueDescriptor,
3484                                  armnn::DataType::QuantisedAsymm8,
3485                                  armnn::DataType::Boolean>(
3486         workloadFactory,
3487         memoryManager,
3488         shape0,
3489         input0,
3490         shape1,
3491         input1,
3492         shape0,
3493         output,
3494         1.0f,
3495         0);
3496 }
3497
3498 LayerTestResult<uint8_t, 4> EqualBroadcast1DVectorUint8Test(
3499         armnn::IWorkloadFactory& workloadFactory,
3500         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3501 {
3502     const unsigned int shape0[] = { 1, 2, 2, 3 };
3503     const unsigned int shape1[] = { 1, 1, 1, 3 };
3504
3505     std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
3506                                   7, 8, 9, 10, 11, 12 });
3507
3508     std::vector<uint8_t> input1({ 1, 1, 3});
3509
3510     std::vector<uint8_t> output({ 1, 0, 1, 0, 0, 0,
3511                                   0, 0, 0, 0, 0, 0 });
3512
3513     return ElementwiseTestHelper<armnn::EqualQueueDescriptor,
3514                                  armnn::DataType::QuantisedAsymm8,
3515                                  armnn::DataType::Boolean>(
3516         workloadFactory,
3517         memoryManager,
3518         shape0,
3519         input0,
3520         shape1,
3521         input1,
3522         shape0,
3523         output,
3524         1.0f,
3525         0);
3526 }
3527
3528 LayerTestResult<uint8_t, 4> GreaterSimpleTest(armnn::IWorkloadFactory& workloadFactory,
3529                                             const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3530 {
3531     const unsigned int width = 2;
3532     const unsigned int height = 2;
3533     const unsigned int channelCount = 2;
3534     const unsigned int batchSize = 2;
3535
3536     unsigned int shape[] = { batchSize, channelCount, height, width };
3537
3538     std::vector<float> input0({ 1, 1, 1, 1,  5, 5, 5, 5,
3539                                 3, 3, 3, 3,  4, 4, 4, 4 });
3540
3541     std::vector<float> input1({ 1, 1, 1, 1,  3, 3, 3, 3,
3542                                 5, 5, 5, 5,  4, 4, 4, 4 });
3543
3544     std::vector<uint8_t> output({ 0, 0, 0, 0,  1, 1, 1, 1,
3545                                   0, 0, 0, 0,  0, 0, 0, 0 });
3546
3547     return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
3548         workloadFactory,
3549         memoryManager,
3550         shape,
3551         input0,
3552         shape,
3553         input1,
3554         shape,
3555         output);
3556 }
3557
3558 LayerTestResult<uint8_t, 4> GreaterBroadcast1ElementTest(
3559         armnn::IWorkloadFactory& workloadFactory,
3560         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3561 {
3562     unsigned int shape0[] = { 1, 2, 2, 2 };
3563     std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
3564
3565     unsigned int shape1[] = { 1, 1, 1, 1 };
3566     std::vector<float> input1({ 1 });
3567
3568     std::vector<uint8_t> output({ 0, 1, 1, 1, 1, 1, 1, 1});
3569
3570     return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
3571         workloadFactory,
3572         memoryManager,
3573         shape0,
3574         input0,
3575         shape1,
3576         input1,
3577         shape0,
3578         output);
3579 }
3580
3581 LayerTestResult<uint8_t, 4> GreaterBroadcast1DVectorTest(
3582         armnn::IWorkloadFactory& workloadFactory,
3583         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3584 {
3585     const unsigned int shape0[] = { 1, 2, 2, 3 };
3586     const unsigned int shape1[] = { 1, 1, 1, 3 };
3587
3588     std::vector<float> input0({ 1, 2.9f, 2.1f, 4, 5, 6,
3589                                 7, 8, 9, 10, 11, 12 });
3590
3591     std::vector<float> input1({ 1, 3, 2});
3592
3593     std::vector<uint8_t> output({ 0, 0, 1, 1, 1, 1,
3594                                   1, 1, 1, 1, 1, 1 });
3595
3596     return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
3597         workloadFactory,
3598         memoryManager,
3599         shape0,
3600         input0,
3601         shape1,
3602         input1,
3603         shape0,
3604         output);
3605 }
3606
3607 LayerTestResult<uint8_t, 4> GreaterUint8Test(
3608         armnn::IWorkloadFactory& workloadFactory,
3609         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3610 {
3611     unsigned int shape[] = { 2, 2, 2, 2 };
3612
3613     // See dequantized values to the right.
3614     std::vector<uint8_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
3615                                   3, 3, 3, 3, 5, 5, 5, 5 });
3616
3617     std::vector<uint8_t> input1({ 2, 2, 2, 2, 6, 6, 6, 6,
3618                                   2, 2, 2, 2, 5, 5, 5, 5 });
3619
3620     std::vector<uint8_t> output({ 0, 0, 0, 0, 0, 0, 0, 0,
3621                                   1, 1, 1, 1, 0, 0, 0, 0 });
3622
3623     return ElementwiseTestHelper<armnn::GreaterQueueDescriptor,
3624                                  armnn::DataType::QuantisedAsymm8,
3625                                  armnn::DataType::Boolean>(
3626         workloadFactory,
3627         memoryManager,
3628         shape,
3629         input0,
3630         shape,
3631         input1,
3632         shape,
3633         output,
3634         1.0f,
3635         0);
3636 }
3637
3638 LayerTestResult<uint8_t, 4> GreaterBroadcast1ElementUint8Test(
3639         armnn::IWorkloadFactory& workloadFactory,
3640         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3641 {
3642     const unsigned int shape0[] = { 1, 2, 2, 3 };
3643     const unsigned int shape1[] = { 1, 1, 1, 1 };
3644
3645     std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
3646                                   7, 8, 9, 10, 11, 12 });
3647
3648     std::vector<uint8_t> input1({ 1 });
3649
3650     std::vector<uint8_t> output({ 0, 1, 1, 1, 1, 1,
3651                                   1, 1, 1, 1, 1, 1 });
3652
3653     return ElementwiseTestHelper<armnn::GreaterQueueDescriptor,
3654                                  armnn::DataType::QuantisedAsymm8,
3655                                  armnn::DataType::Boolean>(
3656         workloadFactory,
3657         memoryManager,
3658         shape0,
3659         input0,
3660         shape1,
3661         input1,
3662         shape0,
3663         output,
3664         1.0f,
3665         0);
3666 }
3667
3668 LayerTestResult<uint8_t, 4> GreaterBroadcast1DVectorUint8Test(
3669         armnn::IWorkloadFactory& workloadFactory,
3670         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3671 {
3672     const unsigned int shape0[] = { 1, 2, 2, 3 };
3673     const unsigned int shape1[] = { 1, 1, 1, 3 };
3674
3675     std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
3676                                   7, 8, 9, 10, 11, 12 });
3677
3678     std::vector<uint8_t> input1({ 1, 1, 3});
3679
3680     std::vector<uint8_t> output({ 0, 1, 0, 1, 1, 1,
3681                                   1, 1, 1, 1, 1, 1 });
3682
3683     return ElementwiseTestHelper<armnn::GreaterQueueDescriptor,
3684                                  armnn::DataType::QuantisedAsymm8,
3685                                  armnn::DataType::Boolean>(
3686         workloadFactory,
3687         memoryManager,
3688         shape0,
3689         input0,
3690         shape1,
3691         input1,
3692         shape0,
3693         output,
3694         1.0f,
3695         0);
3696 }
3697
3698 LayerTestResult<float, 4> MaximumSimpleTest(armnn::IWorkloadFactory& workloadFactory,
3699                                            const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3700 {
3701     const unsigned int width = 2;
3702     const unsigned int height = 2;
3703     const unsigned int channelCount = 2;
3704     const unsigned int batchSize = 2;
3705
3706     unsigned int shape[] = { batchSize, channelCount, height, width };
3707
3708     std::vector<float> input0({ 1, 1, 1, 1,  5, 5, 5, 5,
3709                                 3, 3, 3, 3,  4, 4, 4, 4 });
3710
3711     std::vector<float> input1({ 2, 2, 2, 2,  3, 3, 3, 3,
3712                                 4, 4, 4, 4,  5, 5, 5, 5 });
3713
3714     std::vector<float> output({ 2, 2, 2, 2,  5, 5, 5, 5,
3715                                 4, 4, 4, 4,  5, 5, 5, 5 });
3716
3717     return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::Float32>(
3718         workloadFactory,
3719         memoryManager,
3720         shape,
3721         input0,
3722         shape,
3723         input1,
3724         shape,
3725         output);
3726 }
3727
3728 LayerTestResult<float, 4> MaximumBroadcast1ElementTest(
3729         armnn::IWorkloadFactory& workloadFactory,
3730         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3731 {
3732     unsigned int shape0[] = { 1, 2, 2, 2 };
3733     std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
3734
3735     unsigned int shape1[] = { 1, 1, 1, 1 };
3736     std::vector<float> input1({ 2 });
3737
3738     std::vector<float> output({ 2, 2, 3, 4, 5, 6, 7, 8});
3739
3740     return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::Float32>(
3741         workloadFactory,
3742         memoryManager,
3743         shape0,
3744         input0,
3745         shape1,
3746         input1,
3747         shape0,
3748         output);
3749 }
3750
3751 LayerTestResult<float, 4> MaximumBroadcast1DVectorTest(
3752         armnn::IWorkloadFactory& workloadFactory,
3753         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3754 {
3755     const unsigned int shape0[] = { 1, 2, 2, 3 };
3756     const unsigned int shape1[] = { 1, 1, 1, 3 };
3757
3758     std::vector<float> input0({ 1, 2, 3, 4, 5, 6,
3759                                   7, 8, 9, 10, 11, 12 });
3760
3761     std::vector<float> input1({ 1, 2, 3});
3762
3763     std::vector<float> output({ 1, 2, 3, 4, 5, 6,
3764                                 7, 8, 9, 10, 11, 12 });
3765
3766     return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::Float32>(
3767         workloadFactory,
3768         memoryManager,
3769         shape0,
3770         input0,
3771         shape1,
3772         input1,
3773         shape0,
3774         output);
3775 }
3776
3777 LayerTestResult<uint8_t, 4> MaximumUint8Test(
3778         armnn::IWorkloadFactory& workloadFactory,
3779         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3780 {
3781     unsigned int shape[] = { 2, 2, 2, 2 };
3782
3783     // See dequantized values to the right.
3784     std::vector<uint8_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
3785                                   3, 3, 3, 3, 4, 4, 4, 4 });
3786
3787     std::vector<uint8_t> input1({ 2, 2, 2, 2, 3, 3, 3, 3,
3788                                   4, 4, 4, 4, 5, 5, 5, 5 });
3789
3790     std::vector<uint8_t> output({ 2, 2, 2, 2, 6, 6, 6, 6,
3791                                   4, 4, 4, 4, 5, 5, 5, 5 });
3792
3793     return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
3794         workloadFactory,
3795         memoryManager,
3796         shape,
3797         input0,
3798         shape,
3799         input1,
3800         shape,
3801         output,
3802         1.0f,
3803         0);
3804 }
3805
3806 LayerTestResult<uint8_t, 4> MaximumBroadcast1ElementUint8Test(
3807         armnn::IWorkloadFactory& workloadFactory,
3808         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3809 {
3810     const unsigned int shape0[] = { 1, 2, 2, 3 };
3811     const unsigned int shape1[] = { 1, 1, 1, 1 };
3812
3813     std::vector<uint8_t> input0({ 1, 2, 3, 4,  5, 6,
3814                                   7, 8, 9, 10, 11, 12 });
3815
3816     std::vector<uint8_t> input1({2});
3817
3818     std::vector<uint8_t> output({ 2, 2, 3, 4, 5, 6,
3819                                   7, 8, 9, 10, 11, 12 });
3820
3821     return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
3822         workloadFactory,
3823         memoryManager,
3824         shape0,
3825         input0,
3826         shape1,
3827         input1,
3828         shape0,
3829         output,
3830         1.0f,
3831         0);
3832 }
3833
3834 LayerTestResult<uint8_t, 4> MaximumBroadcast1DVectorUint8Test(
3835         armnn::IWorkloadFactory& workloadFactory,
3836         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3837 {
3838     const unsigned int shape0[] = { 1, 2, 2, 3 };
3839     const unsigned int shape1[] = { 1, 1, 1, 3 };
3840
3841     std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
3842                                   7, 8, 9, 10, 11, 12 });
3843
3844     std::vector<uint8_t> input1({ 1, 10, 3});
3845
3846     std::vector<uint8_t> output({ 1, 10, 3, 4, 10, 6,
3847                                   7, 10, 9, 10, 11, 12 });
3848
3849     return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
3850         workloadFactory,
3851         memoryManager,
3852         shape0,
3853         input0,
3854         shape1,
3855         input1,
3856         shape0,
3857         output,
3858         1.0f,
3859         0);
3860 }
3861
3862 LayerTestResult<int16_t, 4> MaximumInt16Test(
3863     armnn::IWorkloadFactory& workloadFactory,
3864     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3865 {
3866     unsigned int shape[] = { 2, 2, 2, 2 };
3867
3868     std::vector<int16_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
3869                                   3, 3, 3, 3, 4, 4, 4, 4 });
3870
3871     std::vector<int16_t> input1({ 2, 2, 2, 2, 3, 3, 3, 3,
3872                                   4, 4, 4, 4, 5, 5, 5, 5 });
3873
3874     std::vector<int16_t> output({ 2, 2, 2, 2, 6, 6, 6, 6,
3875                                   4, 4, 4, 4, 5, 5, 5, 5 });
3876
3877     return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
3878         workloadFactory,
3879         memoryManager,
3880         shape,
3881         input0,
3882         shape,
3883         input1,
3884         shape,
3885         output,
3886         1.0f,
3887         0);
3888 }
3889
3890 LayerTestResult<int16_t, 4> MaximumBroadcast1ElementInt16Test(
3891     armnn::IWorkloadFactory& workloadFactory,
3892     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3893 {
3894     const unsigned int shape0[] = { 1, 2, 2, 3 };
3895     const unsigned int shape1[] = { 1, 1, 1, 1 };
3896
3897     std::vector<int16_t> input0({ 1, 2, 3, 4,  5, 6,
3898                                   7, 8, 9, 10, 11, 12 });
3899
3900     std::vector<int16_t> input1({2});
3901
3902     std::vector<int16_t> output({ 2, 2, 3, 4, 5, 6,
3903                                   7, 8, 9, 10, 11, 12 });
3904
3905     return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
3906         workloadFactory,
3907         memoryManager,
3908         shape0,
3909         input0,
3910         shape1,
3911         input1,
3912         shape0,
3913         output,
3914         1.0f,
3915         0);
3916 }
3917
3918 LayerTestResult<int16_t, 4> MaximumBroadcast1DVectorInt16Test(
3919     armnn::IWorkloadFactory& workloadFactory,
3920     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3921 {
3922     const unsigned int shape0[] = { 1, 2, 2, 3 };
3923     const unsigned int shape1[] = { 1, 1, 1, 3 };
3924
3925     std::vector<int16_t> input0({ 1, 2, 3, 4, 5, 6,
3926                                   7, 8, 9, 10, 11, 12 });
3927
3928     std::vector<int16_t> input1({ 1, 10, 3});
3929
3930     std::vector<int16_t> output({ 1, 10, 3, 4, 10, 6,
3931                                   7, 10, 9, 10, 11, 12 });
3932
3933     return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
3934         workloadFactory,
3935         memoryManager,
3936         shape0,
3937         input0,
3938         shape1,
3939         input1,
3940         shape0,
3941         output,
3942         1.0f,
3943         0);
3944 }
3945
3946 LayerTestResult<float, 4> MinimumBroadcast1ElementTest1(
3947     armnn::IWorkloadFactory& workloadFactory,
3948     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3949 {
3950     unsigned int shape0[] = { 1, 2, 2, 2 };
3951     std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
3952
3953     unsigned int shape1[] = { 1, 1, 1, 1 };
3954     std::vector<float> input1({ 2 });
3955
3956     std::vector<float> output({ 1, 2, 2, 2, 2, 2, 2, 2});
3957
3958     return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::Float32>(
3959         workloadFactory,
3960         memoryManager,
3961         shape0,
3962         input0,
3963         shape1,
3964         input1,
3965         shape0,
3966         output);
3967 }
3968
3969
3970 LayerTestResult<float, 4> MinimumBroadcast1ElementTest2(
3971     armnn::IWorkloadFactory& workloadFactory,
3972     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3973 {
3974     unsigned int shape0[] = { 1, 2, 2, 2 };
3975     std::vector<float> input0({ 1, 6, 3, 2, 8, 9, 1, 10});
3976
3977     unsigned int shape1[] = { 1, 1, 1, 1 };
3978     std::vector<float> input1({ 5 });
3979
3980     std::vector<float> output({ 1, 5, 3, 2, 5, 5, 1, 5});
3981
3982     return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::Float32>(
3983         workloadFactory,
3984         memoryManager,
3985         shape0,
3986         input0,
3987         shape1,
3988         input1,
3989         shape0,
3990         output);
3991 }
3992
3993 LayerTestResult<uint8_t, 4> MinimumBroadcast1DVectorUint8Test(
3994     armnn::IWorkloadFactory & workloadFactory,
3995     const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager)
3996 {
3997     const unsigned int shape0[] = { 1, 2, 2, 3 };
3998     const unsigned int shape1[] = { 1, 1, 1, 3 };
3999
4000     std::vector<uint8_t> input0({ 1, 2, 3, 3, 2, 1,
4001                                   7, 1, 2, 3, 4, 5 });
4002
4003     std::vector<uint8_t> input1({ 1, 2, 3});
4004
4005     std::vector<uint8_t> output({ 1, 2, 3, 1, 2, 1,
4006                                   1, 1, 2, 1, 2, 3 });
4007
4008     return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
4009         workloadFactory,
4010         memoryManager,
4011         shape0,
4012         input0,
4013         shape1,
4014         input1,
4015         shape0,
4016         output,
4017         1.0f,
4018         0);
4019 }
4020
4021 LayerTestResult<int16_t, 4> MinimumInt16Test(
4022     armnn::IWorkloadFactory& workloadFactory,
4023     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
4024 {
4025     unsigned int shape[] = { 2, 2, 2, 2 };
4026
4027     std::vector<int16_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
4028                                   3, 3, 3, 3, 4, 4, 4, 4 });
4029
4030     std::vector<int16_t> input1({ 2, 2, 2, 2, 3, 3, 3, 3,
4031                                   4, 4, 4, 4, 5, 5, 5, 5 });
4032
4033     std::vector<int16_t> output({ 1, 1, 1, 1, 3, 3, 3, 3,
4034                                   3, 3, 3, 3, 4, 4, 4, 4 });
4035
4036     return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
4037         workloadFactory,
4038         memoryManager,
4039         shape,
4040         input0,
4041         shape,
4042         input1,
4043         shape,
4044         output,
4045         1.0f,
4046         0);
4047 }
4048
4049 LayerTestResult<int16_t, 4> MinimumBroadcast1ElementInt16Test(
4050     armnn::IWorkloadFactory& workloadFactory,
4051     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
4052 {
4053     const unsigned int shape0[] = { 1, 2, 2, 3 };
4054     const unsigned int shape1[] = { 1, 1, 1, 1 };
4055
4056     std::vector<int16_t> input0({ 1, 2, 3, 4,  5, 6,
4057                                   7, 8, 9, 10, 11, 12 });
4058
4059     std::vector<int16_t> input1({2});
4060
4061     std::vector<int16_t> output({ 1, 2, 2, 2, 2, 2,
4062                                   2, 2, 2, 2, 2, 2 });
4063
4064     return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
4065         workloadFactory,
4066         memoryManager,
4067         shape0,
4068         input0,
4069         shape1,
4070         input1,
4071         shape0,
4072         output,
4073         1.0f,
4074         0);
4075 }
4076
4077 LayerTestResult<int16_t, 4> MinimumBroadcast1DVectorInt16Test(
4078     armnn::IWorkloadFactory& workloadFactory,
4079     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
4080 {
4081     const unsigned int shape0[] = { 1, 2, 2, 3 };
4082     const unsigned int shape1[] = { 1, 1, 1, 3 };
4083
4084     std::vector<int16_t> input0({ 1, 2, 3, 4, 5, 6,
4085                                   7, 8, 9, 10, 11, 12 });
4086
4087     std::vector<int16_t> input1({ 1, 10, 3});
4088
4089     std::vector<int16_t> output({ 1, 2, 3, 1, 5, 3,
4090                                   1, 8, 3, 1, 10, 3 });
4091
4092     return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
4093         workloadFactory,
4094         memoryManager,
4095         shape0,
4096         input0,
4097         shape1,
4098         input1,
4099         shape0,
4100         output,
4101         1.0f,
4102         0);
4103 }
4104
4105 namespace {
4106 LayerTestResult<float,4> MultiplicationTestHelper(
4107     armnn::IWorkloadFactory& workloadFactory,
4108     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4109     const unsigned int shape0[4],
4110     const std::vector<float> & values0,
4111     const unsigned int shape1[4],
4112     const std::vector<float> & values1,
4113     const unsigned int outShape[4],
4114     const std::vector<float> & outValues)
4115 {
4116     const uint32_t dimensionCount = 4;
4117     armnn::TensorInfo inputTensorInfo0{dimensionCount, shape0, armnn::DataType::Float32};
4118     armnn::TensorInfo inputTensorInfo1{dimensionCount, shape1, armnn::DataType::Float32};
4119     armnn::TensorInfo outputTensorInfo{dimensionCount, outShape, armnn::DataType::Float32};
4120
4121     auto input0 = MakeTensor<float, 4>(inputTensorInfo0, values0);
4122     auto input1 = MakeTensor<float, 4>(inputTensorInfo1, values1);
4123
4124     LayerTestResult<float,4> ret(outputTensorInfo);
4125
4126     std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
4127     std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
4128     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4129
4130     armnn::MultiplicationQueueDescriptor data;
4131     armnn::WorkloadInfo info;
4132     AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
4133     AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
4134     AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
4135
4136     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
4137
4138     inputHandle0->Allocate();
4139     inputHandle1->Allocate();
4140     outputHandle->Allocate();
4141
4142     CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
4143     CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
4144
4145     workload->PostAllocationConfigure();
4146     workload->Execute();
4147
4148     CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
4149
4150     ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outValues);
4151     return ret;
4152 }
4153 } // anonymous namespace
4154
4155
4156 LayerTestResult<float,4> MultiplicationTest(
4157     armnn::IWorkloadFactory& workloadFactory,
4158     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
4159 {
4160     const unsigned int width = 2;
4161     const unsigned int height = 2;
4162     const unsigned int channelCount = 2;
4163     const unsigned int batchSize = 2;
4164
4165     unsigned int shape[] = { batchSize, channelCount, height, width };
4166
4167     std::vector<float> input0({
4168         1,  1,  1,  1,    2,  2,  2,  2,
4169         3,  3,  3,  3,    4,  4,  4,  4 });
4170
4171     std::vector<float> input1({
4172         2,  2,  2,  2,    3,  3,  3,  3,
4173         4,  4,  4,  4,    5,  5,  5,  5 });
4174
4175     std::vector<float> output({
4176         2,  2,  2,  2,    6,  6,  6,  6,
4177         12, 12, 12, 12,  20, 20, 20, 20 });
4178
4179     return MultiplicationTestHelper(workloadFactory,
4180                                     memoryManager,
4181                                     shape,
4182                                     input0,
4183                                     shape,
4184                                     input1,
4185                                     shape,
4186                                     output);
4187 }
4188
4189 LayerTestResult<float, 4> MultiplicationBroadcast1ElementTest(
4190     armnn::IWorkloadFactory& workloadFactory,
4191     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
4192 {
4193     unsigned int shape0[] = { 1, 2, 2, 2 };
4194     std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
4195
4196     unsigned int shape1[] = { 1, 1, 1, 1 };
4197     std::vector<float> input1({ 2 });
4198
4199     std::vector<float> output({ 2, 4, 6, 8, 10, 12, 14, 16});
4200
4201     return MultiplicationTestHelper(workloadFactory,
4202                                     memoryManager,
4203                                     shape0,
4204                                     input0,
4205                                     shape1,
4206                                     input1,
4207                                     shape0,
4208                                     output);
4209 }
4210
4211 LayerTestResult<float, 4> MultiplicationBroadcast1DVectorTest(
4212     armnn::IWorkloadFactory& workloadFactory,
4213     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
4214 {
4215     unsigned int shape0[] = { 1, 3, 3, 2 };
4216     std::vector<float> input0({
4217         1,   2,      3,  4,      5,  6,
4218         7,   8,      9, 10,     11, 12,
4219         13, 14,     15, 16,     17, 18});
4220
4221     unsigned int shape1[] = { 1, 1, 1, 2 };
4222     std::vector<float> input1({ 1, 2 });
4223
4224     std::vector<float> output({
4225         1,   4,       3,  8,      5, 12,
4226         7,   16,      9, 20,     11, 24,
4227         13,  28,     15, 32,     17, 36});
4228
4229     return MultiplicationTestHelper(workloadFactory,
4230                                     memoryManager,
4231                                     shape0,
4232                                     input0,
4233                                     shape1,
4234                                     input1,
4235                                     shape0,
4236                                     output);
4237 }
4238
4239 LayerTestResult<float,4> CompareMultiplicationTest(
4240     armnn::IWorkloadFactory& workloadFactory,
4241     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4242     armnn::IWorkloadFactory& refWorkloadFactory)
4243 {
4244     const unsigned int width = 16;
4245     const unsigned int height = 32;
4246     const unsigned int channelCount = 2;
4247     const unsigned int batchSize = 5;
4248
4249     armnn::TensorInfo inputTensorInfo0;
4250     armnn::TensorInfo inputTensorInfo1;
4251     armnn::TensorInfo outputTensorInfo;
4252
4253     constexpr unsigned int shape[] = { batchSize, channelCount, height, width };
4254
4255     inputTensorInfo0 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
4256     inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
4257     outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
4258
4259     LayerTestResult<float,4> comparisonResult(outputTensorInfo);
4260
4261     auto input0 = MakeRandomTensor<float, 4>(inputTensorInfo0, 803506992);
4262     auto input1 = MakeRandomTensor<float, 4>(inputTensorInfo1, 54902257);
4263
4264     std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
4265     std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
4266     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4267
4268     std::unique_ptr<armnn::ITensorHandle> inputHandle0Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo0);
4269     std::unique_ptr<armnn::ITensorHandle> inputHandle1Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo1);
4270     std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
4271
4272     armnn::MultiplicationQueueDescriptor data;
4273     armnn::WorkloadInfo info;
4274     AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
4275     AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
4276     AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
4277
4278     armnn::MultiplicationQueueDescriptor refData = data;
4279     armnn::WorkloadInfo refInfo = info;
4280     SetWorkloadInput(refData, refInfo, 0, inputTensorInfo0, inputHandle0Ref.get());
4281     SetWorkloadInput(refData, refInfo, 1, inputTensorInfo1, inputHandle1Ref.get());
4282     SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
4283
4284     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
4285     std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateMultiplication(refData, refInfo);
4286
4287     inputHandle0->Allocate();
4288     inputHandle1->Allocate();
4289     outputHandle->Allocate();
4290     inputHandle0Ref->Allocate();
4291     inputHandle1Ref->Allocate();
4292     outputHandleRef->Allocate();
4293
4294     CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
4295     CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
4296     CopyDataToITensorHandle(inputHandle0Ref.get(), &input0[0][0][0][0]);
4297     CopyDataToITensorHandle(inputHandle1Ref.get(), &input1[0][0][0][0]);
4298
4299     workload->PostAllocationConfigure();
4300     workload->Execute();
4301     workloadRef->PostAllocationConfigure();
4302     workloadRef->Execute();
4303     CopyDataFromITensorHandle(&comparisonResult.output[0][0][0][0], outputHandle.get());
4304     CopyDataFromITensorHandle(&comparisonResult.outputExpected[0][0][0][0], outputHandleRef.get());
4305
4306     return comparisonResult;
4307 }
4308
4309 LayerTestResult<float,4> CompareBatchNormTest(
4310     armnn::IWorkloadFactory& workloadFactory,
4311     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4312     armnn::IWorkloadFactory& refWorkloadFactory)
4313 {
4314     const unsigned int width     = 2;
4315     const unsigned int height    = 3;
4316     const unsigned int channels  = 5;
4317     const unsigned int batchSize = 3;
4318
4319     armnn::TensorInfo inputTensorInfo;
4320     armnn::TensorInfo outputTensorInfo;
4321     armnn::TensorInfo tensorInfo;
4322
4323     constexpr unsigned int shape[]       = {batchSize, channels, height, width};
4324     constexpr unsigned int tensorShape[] = {channels};
4325
4326     inputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
4327     outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
4328     tensorInfo = armnn::TensorInfo(1, tensorShape, armnn::DataType::Float32);
4329
4330     auto input = MakeRandomTensor<float, 4>(inputTensorInfo, 21312);
4331
4332     auto mean     = MakeRandomTensor<float, 1>(tensorInfo, 123);
4333     auto variance = MakeRandomTensor<float, 1>(tensorInfo, 234, 0.0f);
4334     auto beta     = MakeRandomTensor<float, 1>(tensorInfo, 123);
4335     auto gamma    = MakeRandomTensor<float, 1>(tensorInfo, 345);
4336
4337     LayerTestResult<float,4> ret(outputTensorInfo);
4338
4339     std::unique_ptr<armnn::ITensorHandle> inputHandle  = workloadFactory.CreateTensorHandle(inputTensorInfo);
4340     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4341
4342     std::unique_ptr<armnn::ITensorHandle> inputHandleRef  = refWorkloadFactory.CreateTensorHandle(inputTensorInfo);
4343     std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
4344
4345     armnn::BatchNormalizationQueueDescriptor data;
4346     armnn::WorkloadInfo info;
4347     armnn::ScopedCpuTensorHandle meanTensor(tensorInfo);
4348     armnn::ScopedCpuTensorHandle varianceTensor(tensorInfo);
4349     armnn::ScopedCpuTensorHandle betaTensor(tensorInfo);
4350     armnn::ScopedCpuTensorHandle gammaTensor(tensorInfo);
4351
4352     AllocateAndCopyDataToITensorHandle(&meanTensor, &mean[0]);
4353     AllocateAndCopyDataToITensorHandle(&varianceTensor, &variance[0]);
4354     AllocateAndCopyDataToITensorHandle(&betaTensor, &beta[0]);
4355     AllocateAndCopyDataToITensorHandle(&gammaTensor, &gamma[0]);
4356
4357     AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
4358     AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
4359     data.m_Mean             = &meanTensor;
4360     data.m_Variance         = &varianceTensor;
4361     data.m_Beta             = &betaTensor;
4362     data.m_Gamma            = &gammaTensor;
4363     data.m_Parameters.m_Eps = 0.01f;
4364
4365     armnn::BatchNormalizationQueueDescriptor refData = data;
4366     armnn::WorkloadInfo refInfo = info;
4367     SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
4368     SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
4369
4370     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchNormalization(data, info);
4371     std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateBatchNormalization(refData, refInfo);
4372
4373     inputHandle->Allocate();
4374     outputHandle->Allocate();
4375     inputHandleRef->Allocate();
4376     outputHandleRef->Allocate();
4377
4378     CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
4379     CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]);
4380
4381     workload->PostAllocationConfigure();
4382     workload->Execute();
4383     workloadRef->PostAllocationConfigure();
4384     workloadRef->Execute();
4385
4386     CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
4387     CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
4388
4389     return ret;
4390 }
4391
4392 template<typename T>
4393 void PermuteTensorData(
4394         armnn::IWorkloadFactory& workloadFactory,
4395         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4396         const armnn::PermutationVector& mappings,
4397         armnn::TensorInfo & inputTensorInfo,
4398         const T * inputData,
4399         std::vector<T>& outputData)
4400 {
4401     BOOST_ASSERT_MSG(inputData != nullptr, "inputData must not be null");
4402     if (inputData == nullptr)
4403     {
4404         // Nullptr is an error in the test. By returning without doing the concatenation
4405         // I expect the caller to fail the test. It still makes sense to report this as
4406         // an assert for Debug builds.
4407         return;
4408     }
4409
4410     armnn::TensorInfo outputTensorInfo = armnnUtils::Permuted(inputTensorInfo, mappings);
4411
4412     std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
4413     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4414
4415     armnn::PermuteQueueDescriptor queueDescriptor;
4416     queueDescriptor.m_Parameters = armnn::PermuteDescriptor{mappings};
4417     armnn::WorkloadInfo workloadInfo;
4418     AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfo, inputHandle.get());
4419     AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
4420
4421     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePermute(queueDescriptor, workloadInfo);
4422
4423     inputHandle->Allocate();
4424     outputHandle->Allocate();
4425
4426     CopyDataToITensorHandle(inputHandle.get(), inputData);
4427
4428     workload->PostAllocationConfigure();
4429     workload->Execute();
4430
4431     outputData.resize(outputTensorInfo.GetNumElements());
4432     CopyDataFromITensorHandle(&outputData[0], outputHandle.get());
4433     inputTensorInfo = outputTensorInfo;
4434 }
4435
4436 armnn::OriginsDescriptor CreateDescriptorForConcatenation(
4437         const std::vector<armnn::TensorInfo> & inputTensorInfos,
4438         unsigned int concatDim)
4439 {
4440     std::vector<armnn::TensorShape> shapes;
4441     shapes.reserve(inputTensorInfos.size());
4442     for (const armnn::TensorInfo& it: inputTensorInfos)
4443     {
4444         shapes.push_back(it.GetShape());
4445     }
4446
4447     return armnn::CreateDescriptorForConcatenation(shapes.begin(),
4448                                                    shapes.end(),
4449                                                    concatDim);
4450 }
4451
4452 //
4453 // Concatenation is only supported for N and C dimensions for NCHW and the inner most dimension
4454 // In case of <4 dimensions we need to make sure that the concat dimensions are at least
4455 // the 3rd slowest iterating one or the inner most dimension.
4456 //
4457
4458 bool NeedPermuteForConcat(
4459         const std::vector<armnn::TensorInfo> & inputTensorInfos,
4460         unsigned int concatDim)
4461 {
4462     // See note above. Additionally we expect the input shapes to have the
4463     // same number of dimensions.
4464     unsigned int nDimensions = 0;
4465
4466     // Determine the number of dimensions as well as sanity check them
4467     // agains test implementation issues.
4468     for (auto && tensorInfo : inputTensorInfos)
4469     {
4470         if (!nDimensions)
4471         {
4472             nDimensions = tensorInfo.GetShape().GetNumDimensions();
4473         }
4474         else
4475         {
4476             BOOST_ASSERT_MSG(nDimensions == tensorInfo.GetShape().GetNumDimensions(),
4477                 "Input shapes must have the same number of dimensions");
4478         }
4479     }
4480
4481     return (nDimensions < 3 || (nDimensions == 3 && (nDimensions-concatDim) < 3 && (nDimensions-concatDim) != 1));
4482 }
4483
4484 armnn::TensorShape ExpandTensorShapeTo3dForPermute(const armnn::TensorShape & inputShape)
4485 {
4486     unsigned int numDims = inputShape.GetNumDimensions();
4487     if (numDims >= 3)
4488     {
4489         // Nothing to do if the inputShape has at least 3 dimensions.
4490         return inputShape;
4491     }
4492
4493     std::vector<unsigned int> newDims(size_t(3), 1u);
4494     unsigned int expandedBy = 3 - numDims;
4495     for (unsigned int i=0; i<numDims; ++i)
4496     {
4497         newDims[expandedBy+i] = inputShape[i];
4498     }
4499     return armnn::TensorShape(3u, &newDims[0]);
4500 }
4501
4502 void Generate3dPermuteVectorForConcat(
4503         unsigned int numDimensions,
4504         unsigned int & concatDim,
4505         std::pair<armnn::PermutationVector, armnn::PermutationVector> & permutations)
4506 {
4507     BOOST_ASSERT_MSG(numDimensions <= 3,
4508        "Only dimensions 1,2 and 3 are supported by this helper");
4509     unsigned int expandedBy = 3 - numDimensions;
4510     unsigned int expandedConcatAxis = concatDim + expandedBy;
4511
4512     if (expandedConcatAxis == 2)
4513     {
4514         concatDim = 0;
4515         armnn::PermutationVector forwardPermutation({1, 2, 0});
4516         armnn::PermutationVector reversePermutation({2, 0, 1});
4517         permutations = std::make_pair(forwardPermutation, reversePermutation);
4518     }
4519     else if (expandedConcatAxis == 1)
4520     {
4521         concatDim = 0;
4522         armnn::PermutationVector forwardPermutation({2, 0, 1});
4523         armnn::PermutationVector reversePermutation({1, 2, 0});
4524         permutations = std::make_pair(forwardPermutation, reversePermutation);
4525     }
4526     else
4527     {
4528         BOOST_ASSERT(expandedConcatAxis == 0);
4529         concatDim = 0;
4530     }
4531 }
4532
4533 //
4534 // Permute the input tensors so we can do a supported concatenation.
4535 // Also treat lower than 3d tensors as 3d by adding dummy 1 dimensions
4536 // at the front. Finally this function tells what the output shape
4537 // of the permuted concatenated tensor is going to be.
4538 //
4539 template <typename T>
4540 void PermuteInputsForConcat(
4541         armnn::IWorkloadFactory& workloadFactory,
4542         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4543         std::vector<armnn::TensorInfo> & inputTensorInfos,
4544         std::vector<T *> & inputData,
4545         std::vector<std::vector<T>> & inputDataStorage,
4546         armnn::PermutationVector & permuteVector,
4547         unsigned int & concatDim,
4548         armnn::TensorInfo & outputTensorInfo)
4549 {
4550     BOOST_ASSERT_MSG(inputTensorInfos.size() > 1,
4551         "Expecting more than one tensor to be concatenated here");
4552
4553     unsigned int numDims = 0;
4554     unsigned int nthInput = 0;
4555     const armnn::PermutationVector identity({0, 1, 2});
4556
4557     std::pair<armnn::PermutationVector, armnn::PermutationVector> permutations =
4558         std::make_pair(identity, identity);
4559
4560     inputDataStorage.resize(inputData.size());
4561
4562     for (auto && tensorInfo : inputTensorInfos)
4563     {
4564         if (numDims == 0)
4565         {
4566             numDims = tensorInfo.GetShape().GetNumDimensions();
4567             Generate3dPermuteVectorForConcat(numDims, concatDim, permutations);
4568
4569             // Store the reverese permutation.
4570             permuteVector = permutations.second;
4571             BOOST_ASSERT_MSG(!permuteVector.IsEqual(identity),
4572                 "Test logic error, we don't need permutation, so we shouldn't arrive here");
4573         }
4574         else
4575         {
4576             BOOST_ASSERT_MSG(numDims == tensorInfo.GetShape().GetNumDimensions(),
4577                 "All inputs must have the same number of dimensions");
4578         }
4579
4580         armnn::TensorInfo newTensorInfo = tensorInfo;
4581         newTensorInfo.SetShape(ExpandTensorShapeTo3dForPermute(tensorInfo.GetShape()));
4582
4583         PermuteTensorData<T>(workloadFactory,
4584                              memoryManager,
4585                              permutations.first,
4586                              newTensorInfo,
4587                              inputData[nthInput],
4588                              inputDataStorage[nthInput]);
4589
4590         inputData[nthInput] = inputDataStorage[nthInput].data();
4591         inputTensorInfos[nthInput] = newTensorInfo;
4592
4593         ++nthInput;
4594     }
4595
4596     outputTensorInfo.SetShape(
4597         armnnUtils::Permuted(
4598             ExpandTensorShapeTo3dForPermute(outputTensorInfo.GetShape()),
4599             permutations.first));
4600 }
4601
4602
4603 //
4604 // This is the pair of PermuteInputsForConcat(...) which permutes back
4605 // the output of the concatenation so we can check it against an expected
4606 // output.
4607 //
4608 template <typename T>
4609 void PermuteOutputForConcat(
4610         armnn::IWorkloadFactory& workloadFactory,
4611         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4612         const armnn::TensorInfo & tensorInfo,
4613         const armnn::PermutationVector & permuteVector,
4614         std::unique_ptr<armnn::ITensorHandle> && inputDataHandle,
4615         T * data)
4616 {
4617     BOOST_ASSERT_MSG(data != nullptr, "data must not be null");
4618     if (data == nullptr)
4619     {
4620         // Nullptr is an error in the test. By returning without doing the permutation
4621         // I expect the caller to fail the test. It still makes sense to report this as
4622         // an assert for Debug builds.
4623         return;
4624     }
4625
4626     armnn::TensorInfo resultTensorInfo = tensorInfo;
4627     std::vector<T> inputData(tensorInfo.GetNumElements());
4628     std::vector<T> outputData;
4629
4630     CopyDataFromITensorHandle(&inputData[0], inputDataHandle.get());
4631
4632     PermuteTensorData<T>(workloadFactory,
4633                          memoryManager,
4634                          permuteVector,
4635                          resultTensorInfo,
4636                          &inputData[0],
4637                          outputData);
4638
4639     ::memcpy(data, &outputData[0], sizeof(T)*outputData.size());
4640 }
4641
4642 template <typename T>
4643 void Concatenate(
4644     armnn::IWorkloadFactory& workloadFactory,
4645     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4646     std::initializer_list<const armnn::TensorInfo> inputTensorInfosOrig,
4647     std::initializer_list<T *> inputsOrig,
4648     const armnn::TensorInfo& outputTensorInfoOrig,
4649     T * output,
4650     unsigned int concatDim,
4651     bool useSubtensor)
4652 {
4653     BOOST_ASSERT_MSG(output != nullptr, "output must not be null");
4654     if (output == nullptr)
4655     {
4656         // Nullptr is an error in the test. By returning without doing the permutation
4657         // I expect the caller to fail the test. It still makes sense to report this as
4658         // an assert for Debug builds.
4659         return;
4660     }
4661
4662     // Saves a copy of the parameters which we might need to change.
4663     std::vector<armnn::TensorInfo> inputTensorInfos(inputTensorInfosOrig.begin(), inputTensorInfosOrig.end());
4664     std::vector<T *> inputs            = inputsOrig;
4665     armnn::TensorInfo outputTensorInfo = outputTensorInfoOrig;
4666
4667     armnn::PermutationVector permuteVector{0, 1, 2};
4668
4669     // Holds and automatically releases memory for the reshaped input data.
4670     std::vector<std::vector<T>> tmpInputDataStorage;
4671
4672     const size_t inputCount = inputTensorInfos.size();
4673
4674     bool needPermuteForConcat = NeedPermuteForConcat(inputTensorInfos, concatDim);
4675
4676     if (needPermuteForConcat)
4677     {
4678         //
4679         // We need to permute the inputs, because concatenation along
4680         // the requested axis is not supported.
4681         //
4682         PermuteInputsForConcat<T>(workloadFactory,
4683                                   memoryManager,
4684                                   inputTensorInfos,
4685                                   inputs,
4686                                   tmpInputDataStorage,
4687                                   permuteVector,
4688                                   concatDim,
4689                                   outputTensorInfo);
4690     }
4691
4692     armnn::WorkloadInfo workloadInfo;
4693
4694     std::vector<std::unique_ptr<armnn::ITensorHandle>> inputHandles;
4695     inputHandles.reserve(inputCount);
4696
4697     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4698
4699     armnn::ConcatQueueDescriptor queueDescriptor;
4700     armnn::OriginsDescriptor viewsDescriptor = CreateDescriptorForConcatenation(inputTensorInfos, concatDim);
4701     queueDescriptor.m_Parameters = viewsDescriptor;
4702
4703     if (useSubtensor)
4704     {
4705         queueDescriptor.m_ViewOrigins.reserve(viewsDescriptor.GetNumViews());
4706         for (unsigned int i = 0; i < viewsDescriptor.GetNumViews(); ++i)
4707         {
4708             queueDescriptor.m_ViewOrigins.emplace_back(std::vector<unsigned int>(viewsDescriptor.GetViewOrigin(i),
4709                 viewsDescriptor.GetViewOrigin(i) + viewsDescriptor.GetNumDimensions()));
4710         }
4711
4712         outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4713
4714         const bool subTensorsSupported = workloadFactory.SupportsSubTensors();
4715         for (unsigned int i = 0; i < inputCount; ++i)
4716         {
4717             const armnn::TensorInfo& inputTensorInfo = inputTensorInfos[i];
4718             std::unique_ptr<armnn::ITensorHandle> inputHandle =
4719                 subTensorsSupported ?
4720                     workloadFactory.CreateSubTensorHandle(*outputHandle,
4721                                                           inputTensorInfo.GetShape(),
4722                                                           queueDescriptor.m_ViewOrigins[i].m_Origin.data()) :
4723                     workloadFactory.CreateTensorHandle(inputTensorInfo);
4724
4725             inputHandles.emplace_back(std::move(inputHandle));
4726         }
4727
4728     }
4729     else
4730     {
4731         for (unsigned int i = 0; i < inputCount; ++i)
4732         {
4733             std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfos[i]);
4734             inputHandles.emplace_back(std::move(inputHandle));
4735         }
4736     }
4737
4738     for (unsigned int i = 0; i < inputCount; ++i)
4739     {
4740         AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfos[i], inputHandles[i].get());
4741     }
4742
4743     AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
4744
4745     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConcat(queueDescriptor, workloadInfo);
4746
4747     for (auto& inputHandle : inputHandles)
4748     {
4749         inputHandle->Allocate();
4750     }
4751
4752     outputHandle->Allocate();
4753
4754     unsigned int nextInputId = 0;
4755     for (auto& inputHandle : inputHandles)
4756     {
4757         CopyDataToITensorHandle(inputHandle.get(), inputs[nextInputId]);
4758         ++nextInputId;
4759     }
4760
4761     workload->PostAllocationConfigure();
4762     workload->Execute();
4763
4764     if (needPermuteForConcat)
4765     {
4766         PermuteOutputForConcat<T>(workloadFactory,
4767                                   memoryManager,
4768                                   outputTensorInfo,
4769                                   permuteVector,
4770                                   std::move(outputHandle),
4771                                   output);
4772     }
4773     else
4774     {
4775         CopyDataFromITensorHandle(output, outputHandle.get());
4776     }
4777 }
4778
4779 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
4780 LayerTestResult<T, 1> Concatenation1dTestImpl(
4781     armnn::IWorkloadFactory& workloadFactory,
4782     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4783     float qScale,
4784     int32_t qOffset)
4785 {
4786     armnn::TensorInfo inputTensorInfo({ 3 }, ArmnnType, qScale, qOffset);
4787
4788     auto input0 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 1.0f, 2.0f, 3.0f }));
4789     auto input1 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 4.0f, 5.0f, 6.0f }));
4790     auto input2 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 7.0f, 8.0f, 9.0f }));
4791
4792     armnn::TensorInfo outputTensorInfo({ 9 }, ArmnnType, qScale, qOffset);
4793
4794     LayerTestResult<T, 1> result(outputTensorInfo);
4795
4796     std::vector<T> output;
4797     output.resize(outputTensorInfo.GetNumElements());
4798     Concatenate<T>(workloadFactory, memoryManager,
4799                    { inputTensorInfo, inputTensorInfo, inputTensorInfo },
4800                    { input0.data(), input1.data(), input2.data() },
4801                    outputTensorInfo,
4802                    output.data(),
4803                    0,
4804                    true);
4805
4806     result.output = MakeTensor<T, 1>(outputTensorInfo, output);
4807     result.outputExpected = MakeTensor<T, 1>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4808         1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f
4809     }));
4810
4811     return result;
4812 }
4813
4814 LayerTestResult<float, 1> Concatenation1dTest(
4815     armnn::IWorkloadFactory& workloadFactory,
4816     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
4817 {
4818     return Concatenation1dTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
4819 }
4820
4821 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
4822 LayerTestResult<T, 2> Concatenation2dTestImpl(
4823     armnn::IWorkloadFactory& workloadFactory,
4824     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4825     const armnn::TensorInfo& outputTensorInfo,
4826     unsigned int dimension,
4827     const float qScale,
4828     const int32_t qOffset)
4829 {
4830     armnn::TensorInfo inputTensorInfo({ 2, 3 }, ArmnnType, qScale, qOffset);
4831
4832     auto input0 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4833         // Batch 0
4834         1.0f, 2.0f, 3.0f,
4835
4836         // Batch 1
4837         10.0f, 11.0f, 12.0f,
4838     }));
4839
4840     auto input1 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4841         // Batch 0
4842         4.0f, 5.0f, 6.0f,
4843
4844         // Batch 1
4845         13.0f, 14.0f, 15.0f,
4846     }));
4847
4848     auto input2 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4849         // Batch 0
4850         7.0f, 8.0f, 9.0f,
4851
4852         // Batch 1
4853         16.0f, 17.0f, 18.0f,
4854     }));
4855
4856     LayerTestResult<T, 2> result(outputTensorInfo);
4857
4858     std::vector<T> output;
4859     output.resize(outputTensorInfo.GetNumElements());
4860     Concatenate<T>(workloadFactory, memoryManager,
4861                    { inputTensorInfo, inputTensorInfo, inputTensorInfo },
4862                    { input0.data(), input1.data(), input2.data() },
4863                    outputTensorInfo,
4864                    output.data(),
4865                    dimension,
4866                    true);
4867
4868     result.output = MakeTensor<T, 2>(outputTensorInfo, output);
4869     return result;
4870 }
4871
4872 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
4873 LayerTestResult<T, 2> Concatenation2dDim0TestImpl(
4874     armnn::IWorkloadFactory& workloadFactory,
4875     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4876     float qScale,
4877     int32_t qOffset)
4878 {
4879     armnn::TensorInfo outputTensorInfo({ 6, 3 }, ArmnnType, qScale, qOffset);
4880
4881     LayerTestResult<T, 2> result = Concatenation2dTestImpl<ArmnnType>(
4882         workloadFactory, memoryManager, outputTensorInfo, 0, qScale, qOffset);
4883
4884     result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4885         // Batch 0
4886         1.0f, 2.0f, 3.0f,
4887
4888         // Batch 1
4889         10.0f, 11.0f, 12.0f,
4890
4891         // Batch 2
4892         4.0f, 5.0f, 6.0f,
4893
4894         // Batch 3
4895         13.0f, 14.0f, 15.0f,
4896
4897         // Batch 4
4898         7.0f, 8.0f, 9.0f,
4899
4900         // Batch 5
4901         16.0f, 17.0f, 18.0f,
4902     }));
4903
4904     return result;
4905 }
4906
4907 LayerTestResult<float, 2> Concatenation2dDim0Test(
4908     armnn::IWorkloadFactory& workloadFactory,
4909     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
4910 {
4911     return Concatenation2dDim0TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
4912 }
4913
4914 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
4915 LayerTestResult<T, 2> Concatenation2dDim1TestImpl(
4916     armnn::IWorkloadFactory& workloadFactory,
4917     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4918     float qScale,
4919     int32_t qOffset)
4920 {
4921     armnn::TensorInfo outputTensorInfo({ 2, 9 }, ArmnnType, qScale, qOffset);
4922
4923     LayerTestResult<T, 2> result = Concatenation2dTestImpl<ArmnnType>(
4924         workloadFactory, memoryManager, outputTensorInfo, 1, qScale, qOffset);
4925
4926     result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4927         // Batch 0
4928         1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f,
4929
4930         // Batch 1
4931         10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f
4932     }));
4933
4934     return result;
4935 }
4936
4937 LayerTestResult<float, 2> Concatenation2dDim1Test(
4938     armnn::IWorkloadFactory& workloadFactory,
4939     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
4940 {
4941     return Concatenation2dDim1TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
4942 }
4943
4944 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
4945 LayerTestResult<T, 2> Concatenation2dDim0DiffInputDimsTestImpl(
4946     armnn::IWorkloadFactory& workloadFactory,
4947     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4948     float qScale,
4949     int32_t qOffset)
4950 {
4951     armnn::TensorInfo input0TensorInfo({ 2, 3 }, ArmnnType, qScale, qOffset);
4952     auto input0 = MakeTensor<T, 2>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4953         // Batch 0
4954         1.0f, 2.0f, 3.0f,
4955
4956         // Batch 1
4957         10.0f, 11.0f, 12.0f,
4958     }));
4959
4960     armnn::TensorInfo input1TensorInfo({ 3, 3 }, ArmnnType, qScale, qOffset);
4961     auto input1 = MakeTensor<T, 2>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4962         // Batch 0
4963         4.0f, 5.0f, 6.0f,
4964
4965         // Batch 1
4966         13.0f, 14.0f, 15.0f,
4967
4968         // Batch 0
4969         7.0f, 8.0f, 9.0f,
4970     }));
4971
4972     armnn::TensorInfo input2TensorInfo({ 1, 3 }, ArmnnType, qScale, qOffset);
4973     auto input2 = MakeTensor<T, 2>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4974         // Batch 1
4975         16.0f, 17.0f, 18.0f,
4976     }));
4977
4978     armnn::TensorInfo outputTensorInfo({ 6, 3 }, ArmnnType, qScale, qOffset);
4979     LayerTestResult<T, 2> result(outputTensorInfo);
4980
4981     std::vector<T> output;
4982     output.resize(outputTensorInfo.GetNumElements());
4983     Concatenate<T>(workloadFactory, memoryManager,
4984                    { input0TensorInfo, input1TensorInfo, input2TensorInfo },
4985                    { input0.data(), input1.data(), input2.data() },
4986                    outputTensorInfo,
4987                    output.data(),
4988                    0,
4989                    true);
4990
4991     result.output = MakeTensor<T, 2>(outputTensorInfo, output);
4992     result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4993         // Batch 0
4994         1.0f, 2.0f, 3.0f,
4995
4996         // Batch 1
4997         10.0f, 11.0f, 12.0f,
4998
4999         // Batch 2
5000         4.0f, 5.0f, 6.0f,
5001
5002         // Batch 3
5003         13.0f, 14.0f, 15.0f,
5004
5005         // Batch 4
5006         7.0f, 8.0f, 9.0f,
5007
5008         // Batch 5
5009         16.0f, 17.0f, 18.0f,
5010     }));
5011
5012     return result;
5013 }
5014
5015 LayerTestResult<float, 2> Concatenation2dDim0DiffInputDimsTest(
5016     armnn::IWorkloadFactory& workloadFactory,
5017     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
5018 {
5019     return Concatenation2dDim0DiffInputDimsTestImpl<armnn::DataType::Float32>(
5020         workloadFactory, memoryManager, 0.0f, 0);
5021 }
5022
5023 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
5024 LayerTestResult<T, 2> Concatenation2dDim1DiffInputDimsTestImpl(
5025     armnn::IWorkloadFactory& workloadFactory,
5026     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5027     float qScale,
5028     int32_t qOffset)
5029 {
5030     armnn::TensorInfo input0TensorInfo({ 2, 3 }, ArmnnType, qScale, qOffset);
5031     auto input0 = MakeTensor<T, 2>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5032         // Batch 0
5033         1.0f, 2.0f, 3.0f,
5034
5035         // Batch 1
5036         10.0f, 11.0f, 12.0f,
5037     }));
5038
5039     armnn::TensorInfo input1TensorInfo({ 2, 5 }, ArmnnType, qScale, qOffset);
5040     auto input1 = MakeTensor<T, 2>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5041         // Batch 0
5042         4.0f, 5.0f, 6.0f, 7.0f, 8.0f,
5043
5044         // Batch 1
5045         13.0f, 14.0f, 15.0f, 16.0f, 17.0f,
5046     }));
5047
5048     armnn::TensorInfo input2TensorInfo({ 2, 1 }, ArmnnType, qScale, qOffset);
5049     auto input2 = MakeTensor<T, 2>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5050         // Batch 0
5051         9.0f,
5052
5053         // Batch 1
5054         18.0f
5055     }));
5056
5057     armnn::TensorInfo outputTensorInfo({ 2, 9 }, ArmnnType, qScale, qOffset);
5058     LayerTestResult<T, 2> result(outputTensorInfo);
5059
5060     std::vector<T> output;
5061     output.resize(outputTensorInfo.GetNumElements());
5062     Concatenate<T>(workloadFactory, memoryManager,
5063                    { input0TensorInfo, input1TensorInfo, input2TensorInfo },
5064                    { input0.data(), input1.data(), input2.data() },
5065                    outputTensorInfo,
5066                    output.data(),
5067                    1,
5068                    true);
5069
5070     result.output = MakeTensor<T, 2>(outputTensorInfo, output);
5071     result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5072         // Batch 0
5073         1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f,
5074
5075         // Batch 1
5076         10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f,
5077     }));
5078
5079     return result;
5080 }
5081
5082 LayerTestResult<float, 2> Concatenation2dDim1DiffInputDimsTest(
5083     armnn::IWorkloadFactory& workloadFactory,
5084     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
5085 {
5086     return Concatenation2dDim1DiffInputDimsTestImpl<armnn::DataType::Float32>(
5087         workloadFactory, memoryManager, 0.0f, 0);
5088 }
5089
5090 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
5091 LayerTestResult<T, 3> Concatenation3dTestImpl(
5092     armnn::IWorkloadFactory& workloadFactory,
5093     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5094     const armnn::TensorInfo& outputTensorInfo,
5095     unsigned int dimension,
5096     bool useSubtensor,
5097     float qScale,
5098     int32_t qOffset)
5099 {
5100     armnn::TensorInfo inputTensorInfo({ 2, 3, 2 }, ArmnnType, qScale, qOffset);
5101
5102     auto input0 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5103         // Batch 0, Channel 0
5104         1.0f, 2.0f,
5105
5106         // Batch 0, Channel 1
5107         3.0f, 4.0f,
5108
5109         // Batch 0, Channel 2
5110         5.0f, 6.0f,
5111
5112         // Batch 1, Channel 0
5113         19.0f, 20.0f,
5114
5115         // Batch 1, Channel 1
5116         21.0f, 22.0f,
5117
5118         // Batch 1, Channel 2
5119         23.0f, 24.0f
5120     }));
5121
5122     auto input1 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5123         // Batch 0, Channel 0
5124         7.0f, 8.0f,
5125
5126         // Batch 0, Channel 1
5127         9.0f, 10.0f,
5128
5129         // Batch 0, Channel 2
5130         11.0f, 12.0f,
5131
5132         // Batch 1, Channel 0
5133         25.0f, 26.0f,
5134
5135         // Batch 1, Channel 1
5136         27.0f, 28.0f,
5137
5138         // Batch 1, Channel 2
5139         29.0f, 30.0f
5140     }));
5141
5142     auto input2 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5143         // Batch 0, Channel 0
5144         13.0f, 14.0f,
5145
5146         // Batch 0, Channel 1
5147         15.0f, 16.0f,
5148
5149         // Batch 0, Channel 2
5150         17.0f, 18.0f,
5151
5152         // Batch 1, Channel 0
5153         31.0f, 32.0f,
5154
5155         // Batch 1, Channel 1
5156         33.0f, 34.0f,
5157
5158         // Batch 1, Channel 2
5159         35.0f, 36.0f
5160     }));
5161
5162     LayerTestResult<T, 3> result(outputTensorInfo);
5163
5164     std::vector<T> output;
5165     output.resize(outputTensorInfo.GetNumElements());
5166     Concatenate<T>(workloadFactory, memoryManager,
5167                    { inputTensorInfo, inputTensorInfo, inputTensorInfo },
5168                    { input0.data(), input1.data(), input2.data() },
5169                    outputTensorInfo,
5170                    output.data(),
5171                    dimension,
5172                    useSubtensor);
5173
5174     result.output = MakeTensor<T, 3>(outputTensorInfo, output);
5175     return result;
5176 }
5177
5178 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
5179 LayerTestResult<T, 3> Concatenation3dDim0TestImpl(
5180     armnn::IWorkloadFactory& workloadFactory,
5181     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5182     float qScale,
5183     int32_t qOffset)
5184 {
5185     armnn::TensorInfo outputTensorInfo({ 6, 3, 2 }, ArmnnType, qScale, qOffset);
5186
5187     LayerTestResult<T, 3> result = Concatenation3dTestImpl<ArmnnType>(
5188         workloadFactory, memoryManager, outputTensorInfo, 0, true, qScale, qOffset);
5189
5190     result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5191         // Batch 0, Channel 0
5192         1.0f, 2.0f,
5193
5194         // Batch 0, Channel 1
5195         3.0f, 4.0f,
5196
5197         // Batch 0, Channel 2
5198         5.0f, 6.0f,
5199
5200         // Batch 1, Channel 0
5201         19.0f, 20.0f,
5202
5203         // Batch 1, Channel 1
5204         21.0f, 22.0f,
5205
5206         // Batch 1, Channel 2
5207         23.0f, 24.0f,
5208
5209         // Batch 2, Channel 0
5210         7.0f, 8.0f,
5211
5212         // Batch 2, Channel 1
5213         9.0f, 10.0f,
5214
5215         // Batch 2, Channel 2
5216         11.0f, 12.0f,
5217
5218         // Batch 3, Channel 0
5219         25.0f, 26.0f,
5220
5221         // Batch 3, Channel 1
5222         27.0f, 28.0f,
5223
5224         // Batch 3, Channel 2
5225         29.0f, 30.0f,
5226
5227         // Batch 4, Channel 0
5228         13.0f, 14.0f,
5229
5230         // Batch 4, Channel 1
5231         15.0f, 16.0f,
5232
5233         // Batch 4, Channel 2
5234         17.0f, 18.0f,
5235
5236         // Batch 5, Channel 0
5237         31.0f, 32.0f,
5238
5239         // Batch 5, Channel 1
5240         33.0f, 34.0f,
5241
5242         // Batch 5, Channel 2
5243         35.0f, 36.0f
5244     }));
5245
5246     return result;
5247 }
5248
5249 LayerTestResult<float, 3> Concatenation3dDim0Test(
5250     armnn::IWorkloadFactory& workloadFactory,
5251     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
5252 {
5253     return Concatenation3dDim0TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
5254 }
5255
5256 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
5257 LayerTestResult<T, 3> Concatenation3dDim1TestImpl(
5258     armnn::IWorkloadFactory& workloadFactory,
5259     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5260     float qScale,
5261     int32_t qOffset)
5262 {
5263     armnn::TensorInfo outputTensorInfo({ 2, 9, 2 }, ArmnnType, qScale, qOffset);
5264
5265     LayerTestResult<T, 3> result = Concatenation3dTestImpl<ArmnnType>(
5266         workloadFactory, memoryManager, outputTensorInfo, 1, true, qScale, qOffset);
5267
5268     result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5269         // Batch 0, Channel 0
5270         1.0f, 2.0f,
5271
5272         // Batch 0, Channel 1
5273         3.0f, 4.0f,
5274
5275         // Batch 0, Channel 2
5276         5.0f, 6.0f,
5277
5278         // Batch 0, Channel 3
5279         7.0f, 8.0f,
5280
5281         // Batch 0, Channel 4
5282         9.0f, 10.0f,
5283
5284         // Batch 0, Channel 5
5285         11.0f, 12.0f,
5286
5287         // Batch 0, Channel 6
5288         13.0f, 14.0f,
5289
5290         // Batch 0, Channel 7
5291         15.0f, 16.0f,
5292
5293         // Batch 0, Channel 8
5294         17.0f, 18.0f,
5295
5296         // Batch 1, Channel 0
5297         19.0f, 20.0f,
5298
5299         // Batch 1, Channel 1
5300         21.0f, 22.0f,
5301
5302         // Batch 1, Channel 2
5303         23.0f, 24.0f,
5304
5305         // Batch 1, Channel 3
5306         25.0f, 26.0f,
5307
5308         // Batch 1, Channel 4
5309         27.0f, 28.0f,
5310
5311         // Batch 1, Channel 5
5312         29.0f, 30.0f,
5313
5314         // Batch 1, Channel 6
5315         31.0f, 32.0f,
5316
5317         // Batch 1, Channel 7
5318         33.0f, 34.0f,
5319
5320         // Batch 1, Channel 8
5321         35.0f, 36.0f
5322     }));
5323
5324     return result;
5325 }
5326
5327 LayerTestResult<float, 3> Concatenation3dDim1Test(
5328     armnn::IWorkloadFactory& workloadFactory,
5329     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
5330 {
5331     return Concatenation3dDim1TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
5332 }
5333
5334 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
5335 LayerTestResult<T, 3> Concatenation3dDim2TestImpl(
5336     armnn::IWorkloadFactory& workloadFactory,
5337     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5338     bool useSubtensor,
5339     float qScale,
5340     int32_t qOffset)
5341 {
5342     armnn::TensorInfo outputTensorInfo({ 2, 3, 6 }, ArmnnType, qScale, qOffset);
5343
5344     LayerTestResult<T, 3> result = Concatenation3dTestImpl<ArmnnType>(
5345         workloadFactory, memoryManager, outputTensorInfo, 2, useSubtensor, qScale, qOffset);
5346
5347     result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5348         // Batch 0, Channel 0
5349         1.0f, 2.0f, 7.0f, 8.0f, 13.0f, 14.0f,
5350
5351         // Batch 0, Channel 1
5352         3.0f, 4.0f, 9.0f, 10.0f, 15.0f, 16.0f,
5353
5354         // Batch 0, Channel 2
5355         5.0f, 6.0f, 11.0f, 12.0f, 17.0f, 18.0f,
5356
5357         // Batch 1, Channel 0
5358         19.0f, 20.0f, 25.0f, 26.0f, 31.0f, 32.0f,
5359
5360         // Batch 1, Channel 1
5361         21.0f, 22.0f, 27.0f, 28.0f, 33.0f, 34.0f,
5362
5363         // Batch 1, Channel 2
5364         23.0f, 24.0f, 29.0f, 30.0f, 35.0f, 36.0f,
5365     }));
5366
5367     return result;
5368 }
5369
5370 LayerTestResult<float, 3> Concatenation3dDim2Test(
5371     armnn::IWorkloadFactory& workloadFactory,
5372     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5373     bool useSubtensor)
5374 {
5375     return Concatenation3dDim2TestImpl<armnn::DataType::Float32>(
5376         workloadFactory, memoryManager, useSubtensor, 0.0f, 0);
5377 }
5378
5379 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
5380 LayerTestResult<T, 3> Concatenation3dDim0DiffInputDimsTestImpl(
5381     armnn::IWorkloadFactory& workloadFactory,
5382     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5383     float qScale,
5384     int32_t qOffset)
5385 {
5386     armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType);
5387     auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5388             // Batch 0, Channel 0
5389             1.0f, 2.0f,
5390
5391             // Batch 0, Channel 1
5392             3.0f, 4.0f,
5393
5394             // Batch 0, Channel 2
5395             5.0f, 6.0f,
5396
5397             // Batch 1, Channel 0
5398             19.0f, 20.0f,
5399
5400             // Batch 1, Channel 1
5401             21.0f, 22.0f,
5402
5403             // Batch 1, Channel 2
5404             23.0f, 24.0f
5405     }));
5406
5407     armnn::TensorInfo input1TensorInfo({ 1, 3, 2 }, ArmnnType);
5408     auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5409             // Batch 0, Channel 0
5410             7.0f, 8.0f,
5411
5412             // Batch 0, Channel 1
5413             9.0f, 10.0f,
5414
5415             // Batch 0, Channel 2
5416             11.0f, 12.0f,
5417     }));
5418
5419     armnn::TensorInfo input2TensorInfo({ 3, 3, 2 }, ArmnnType);
5420     auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5421             // Batch 0, Channel 0
5422             25.0f, 26.0f,
5423
5424             // Batch 0, Channel 1
5425             27.0f, 28.0f,
5426
5427             // Batch 0, Channel 2
5428             29.0f, 30.0f,
5429
5430             // Batch 1, Channel 0
5431             13.0f, 14.0f,
5432
5433             // Batch 1, Channel 1
5434             15.0f, 16.0f,
5435
5436             // Batch 1, Channel 2
5437             17.0f, 18.0f,
5438
5439             // Batch 2, Channel 0
5440             31.0f, 32.0f,
5441
5442             // Batch 2, Channel 1
5443             33.0f, 34.0f,
5444
5445             // Batch 2, Channel 2
5446             35.0f, 36.0f
5447     }));
5448
5449     armnn::TensorInfo outputTensorInfo({ 6, 3, 2 }, ArmnnType);
5450     LayerTestResult<T, 3> result(outputTensorInfo);
5451
5452     std::vector<T> output;
5453     output.resize(outputTensorInfo.GetNumElements());
5454     Concatenate<T>(workloadFactory, memoryManager,
5455                    { input0TensorInfo, input1TensorInfo, input2TensorInfo },
5456                    { input0.data(), input1.data(), input2.data() },
5457                    outputTensorInfo,
5458                    output.data(),
5459                    0,
5460                    true);
5461
5462     result.output = MakeTensor<T, 3>(outputTensorInfo, output);
5463     result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5464         // Batch 0, Channel 0
5465         1.0f, 2.0f,
5466
5467         // Batch 0, Channel 1
5468         3.0f, 4.0f,
5469
5470         // Batch 0, Channel 2
5471         5.0f, 6.0f,
5472
5473         // Batch 1, Channel 0
5474         19.0f, 20.0f,
5475
5476         // Batch 1, Channel 1
5477         21.0f, 22.0f,
5478
5479         // Batch 1, Channel 2
5480         23.0f, 24.0f,
5481
5482         // Batch 2, Channel 0
5483         7.0f, 8.0f,
5484
5485         // Batch 2, Channel 1
5486         9.0f, 10.0f,
5487
5488         // Batch 2, Channel 2
5489         11.0f, 12.0f,
5490
5491         // Batch 3, Channel 0
5492         25.0f, 26.0f,
5493
5494         // Batch 3, Channel 1
5495         27.0f, 28.0f,
5496
5497         // Batch 3, Channel 2
5498         29.0f, 30.0f,
5499
5500         // Batch 4, Channel 0
5501         13.0f, 14.0f,
5502
5503         // Batch 4, Channel 1
5504         15.0f, 16.0f,
5505
5506         // Batch 4, Channel 2
5507         17.0f, 18.0f,
5508
5509         // Batch 5, Channel 0
5510         31.0f, 32.0f,
5511
5512         // Batch 5, Channel 1
5513         33.0f, 34.0f,
5514
5515         // Batch 5, Channel 2
5516         35.0f, 36.0f
5517     }));
5518
5519     return result;
5520 }
5521
5522 LayerTestResult<float, 3> Concatenation3dDim0DiffInputDimsTest(
5523     armnn::IWorkloadFactory& workloadFactory,
5524     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
5525 {
5526     return Concatenation3dDim0DiffInputDimsTestImpl<armnn::DataType::Float32>(
5527         workloadFactory, memoryManager, 0.0f, 0);
5528 }
5529
5530 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
5531 LayerTestResult<T, 3> Concatenation3dDim1DiffInputDimsTestImpl(
5532     armnn::IWorkloadFactory& workloadFactory,
5533     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5534     float qScale,
5535     int32_t qOffset)
5536 {
5537     armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType, qScale, qOffset);
5538     auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5539         // Batch 0, Channel 0
5540         1.0f, 2.0f,
5541
5542         // Batch 0, Channel 1
5543         3.0f, 4.0f,
5544
5545         // Batch 0, Channel 2
5546         5.0f, 6.0f,
5547
5548         // Batch 1, Channel 0
5549         19.0f, 20.0f,
5550
5551         // Batch 1, Channel 1
5552         21.0f, 22.0f,
5553
5554         // Batch 1, Channel 2
5555         23.0f, 24.0f
5556     }));
5557
5558     armnn::TensorInfo input1TensorInfo({ 2, 4, 2 }, ArmnnType, qScale, qOffset);
5559     auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5560         // Batch 0, Channel 0
5561         7.0f, 8.0f,
5562
5563         // Batch 0, Channel 1
5564         9.0f, 10.0f,
5565
5566         // Batch 0, Channel 2
5567         11.0f, 12.0f,
5568
5569         // Batch 0, Channel 3
5570         25.0f, 26.0f,
5571
5572         // Batch 1, Channel 0
5573         27.0f, 28.0f,
5574
5575         // Batch 1, Channel 1
5576         29.0f, 30.0f,
5577
5578         // Batch 1, Channel 2
5579         13.0f, 14.0f,
5580
5581         // Batch 1, Channel 3
5582         15.0f, 16.0f,
5583     }));
5584
5585     armnn::TensorInfo input2TensorInfo({ 2, 1, 2 }, ArmnnType, qScale, qOffset);
5586     auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5587         // Batch 0, Channel 0
5588         17.0f, 18.0f,
5589
5590         // Batch 1, Channel 0
5591         31.0f, 32.0f,
5592     }));
5593
5594     armnn::TensorInfo outputTensorInfo({ 2, 8, 2 }, ArmnnType, qScale, qOffset);
5595     LayerTestResult<T, 3> result(outputTensorInfo);
5596
5597     std::vector<T> output;
5598     output.resize(outputTensorInfo.GetNumElements());
5599     Concatenate<T>(workloadFactory, memoryManager,
5600                    { input0TensorInfo, input1TensorInfo, input2TensorInfo },
5601                    { input0.data(), input1.data(), input2.data() },
5602                    outputTensorInfo,
5603                    output.data(),
5604                    1,
5605                    true);
5606
5607     result.output = MakeTensor<T, 3>(outputTensorInfo, output);
5608     result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5609         // Batch 0, Channel 0
5610         1.0f, 2.0f,
5611
5612         // Batch 0, Channel 1
5613         3.0f, 4.0f,
5614
5615         // Batch 0, Channel 2
5616         5.0f, 6.0f,
5617
5618         // Batch 0, Channel 3
5619         7.0f, 8.0f,
5620
5621         // Batch 0, Channel 4
5622         9.0f, 10.0f,
5623
5624         // Batch 0, Channel 5
5625         11.0f, 12.0f,
5626
5627         // Batch 0, Channel 6
5628         25.0f, 26.0f,
5629
5630         // Batch 0, Channel 7
5631         17.0f, 18.0f,
5632
5633         // Batch 1, Channel 0
5634         19.0f, 20.0f,
5635
5636         // Batch 1, Channel 1
5637         21.0f, 22.0f,
5638
5639         // Batch 1, Channel 2
5640         23.0f, 24.0f,
5641
5642         // Batch 1, Channel 3
5643         27.0f, 28.0f,
5644
5645         // Batch 1, Channel 4
5646         29.0f, 30.0f,
5647
5648         // Batch 1, Channel 5
5649         13.0f, 14.0f,
5650
5651         // Batch 1, Channel 6
5652         15.0f, 16.0f,
5653
5654         // Batch 1, Channel 7
5655         31.0f, 32.0f,
5656     }));
5657
5658     return result;
5659 }
5660
5661 LayerTestResult<float, 3> Concatenation3dDim1DiffInputDimsTest(
5662     armnn::IWorkloadFactory& workloadFactory,
5663     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
5664 {
5665     return Concatenation3dDim1DiffInputDimsTestImpl<armnn::DataType::Float32>(
5666         workloadFactory, memoryManager, 0.0f, 0);
5667 }
5668
5669 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
5670 LayerTestResult<T, 3> Concatenation3dDim2DiffInputDimsTestImpl(
5671     armnn::IWorkloadFactory& workloadFactory,
5672     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5673     bool useSubtensor,
5674     float qScale,
5675     int32_t qOffset)
5676 {
5677     armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType, qScale, qOffset);
5678     auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5679         // Batch 0, Channel 0
5680         1.0f, 2.0f,
5681
5682         // Batch 0, Channel 1
5683         3.0f, 4.0f,
5684
5685         // Batch 0, Channel 2
5686         5.0f, 6.0f,
5687
5688         // Batch 1, Channel 0
5689         19.0f, 20.0f,
5690
5691         // Batch 1, Channel 1
5692         21.0f, 22.0f,
5693
5694         // Batch 1, Channel 2
5695         23.0f, 24.0f
5696     }));
5697
5698     armnn::TensorInfo input1TensorInfo({ 2, 3, 1 }, ArmnnType, qScale, qOffset);
5699     auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5700         // Batch 0, Channel 0
5701         7.0f,
5702
5703         // Batch 0, Channel 1
5704         9.0f,
5705
5706         // Batch 0, Channel 2
5707         11.0f,
5708
5709         // Batch 1, Channel 0
5710         25.0f,
5711
5712         // Batch 1, Channel 1
5713         27.0f,
5714
5715         // Batch 1, Channel 2
5716         29.0f
5717     }));
5718
5719     armnn::TensorInfo input2TensorInfo({ 2, 3, 3 }, ArmnnType, qScale, qOffset);
5720     auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5721         // Batch 0, Channel 0
5722         13.0f, 14.0f, 50.0f,
5723
5724         // Batch 0, Channel 1
5725         15.0f, 16.0f, 51.0f,
5726
5727         // Batch 0, Channel 2
5728         17.0f, 18.0f, 52.0f,
5729
5730         // Batch 1, Channel 0
5731         31.0f, 32.0f, 53.0f,
5732
5733         // Batch 1, Channel 1
5734         33.0f, 34.0f, 54.0f,
5735
5736         // Batch 1, Channel 2
5737         35.0f, 36.0f, 55.0f,
5738     }));
5739
5740     armnn::TensorInfo outputTensorInfo({ 2, 3, 6 }, ArmnnType, qScale, qOffset);
5741     LayerTestResult<T, 3> result(outputTensorInfo);
5742
5743     std::vector<T> output;
5744     output.resize(outputTensorInfo.GetNumElements());
5745     Concatenate<T>(workloadFactory, memoryManager,
5746                    { input0TensorInfo, input1TensorInfo, input2TensorInfo },
5747                    { input0.data(), input1.data(), input2.data() },
5748                    outputTensorInfo,
5749                    output.data(),
5750                    2,
5751                    useSubtensor);
5752
5753     result.output = MakeTensor<T, 3>(outputTensorInfo, output);
5754     result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5755         // Batch 0, Channel 0
5756         1.0f, 2.0f, 7.0f, 13.0f, 14.0f, 50.0f,
5757
5758         // Batch 0, Channel 1
5759         3.0f, 4.0f, 9.0f, 15.0f, 16.0f, 51.0f,
5760
5761         // Batch 0, Channel 2
5762         5.0f, 6.0f, 11.0f, 17.0f, 18.0f, 52.0f,
5763
5764         // Batch 1, Channel 0
5765         19.0f, 20.0f, 25.0f, 31.0f, 32.0f, 53.0f,
5766
5767         // Batch 1, Channel 1
5768         21.0f, 22.0f, 27.0f, 33.0f, 34.0f, 54.0f,
5769
5770         // Batch 1, Channel 2
5771         23.0f, 24.0f, 29.0f, 35.0f, 36.0f, 55.0f,
5772     }));
5773
5774     return result;
5775 }
5776
5777 LayerTestResult<float, 3> Concatenation3dDim2DiffInputDimsTest(
5778     armnn::IWorkloadFactory& workloadFactory,
5779     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5780     bool useSubtensor)
5781 {
5782     return Concatenation3dDim2DiffInputDimsTestImpl<armnn::DataType::Float32>(
5783         workloadFactory, memoryManager, useSubtensor, 0.0f, 0);
5784 }
5785
5786 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
5787 LayerTestResult<T, 4> Concatenation4dTestImpl(
5788     armnn::IWorkloadFactory& workloadFactory,
5789     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5790     const armnn::TensorInfo& outputTensorInfo,
5791     unsigned int dimension,
5792     bool useSubtensor,
5793     float qScale,
5794     int32_t qOffset)
5795 {
5796     armnn::TensorInfo inputTensorInfo({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
5797
5798     auto input0 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5799         1.0f, 2.0f,
5800         3.0f, 4.0f,
5801         5.0f, 6.0f,
5802         7.0f, 8.0f,
5803         9.0f, 10.0f,
5804         11.0f, 12.0f
5805     }));
5806
5807     auto input1 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5808         11.0f, 12.0f,
5809         13.0f, 14.0f,
5810         15.0f, 16.0f,
5811         17.0f, 18.0f,
5812         19.0f, 20.0f,
5813         21.0f, 22.0f
5814     }));
5815
5816     auto input2 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5817         21.0f, 22.0f,
5818         23.0f, 24.0f,
5819         25.0f, 26.0f,
5820         27.0f, 28.0f,
5821         29.0f, 30.0f,
5822         31.0f, 32.0f
5823     }));
5824
5825     LayerTestResult<T, 4> result(outputTensorInfo);
5826
5827     std::vector<T> output;
5828     output.resize(outputTensorInfo.GetNumElements());
5829
5830     Concatenate<T>(workloadFactory,
5831                    memoryManager,
5832                    {inputTensorInfo, inputTensorInfo, inputTensorInfo},
5833                    {input0.data(), input1.data(), input2.data()},
5834                    outputTensorInfo,
5835                    output.data(),
5836                    dimension,
5837                    useSubtensor);
5838
5839     result.output = MakeTensor<T, 4>(outputTensorInfo, output);
5840     return result;
5841 }
5842
5843 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
5844 LayerTestResult<T, 4> Concatenation4dDim0TestImpl(
5845     armnn::IWorkloadFactory& workloadFactory,
5846     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5847     float qScale,
5848     int32_t qOffset)
5849 {
5850     armnn::TensorInfo outputTensorInfo({ 3, 3, 2, 2 }, ArmnnType, qScale, qOffset);
5851
5852     LayerTestResult<T, 4> result = Concatenation4dTestImpl<ArmnnType>(
5853         workloadFactory, memoryManager, outputTensorInfo, 0, true, qScale, qOffset);
5854
5855     result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5856         1.0f, 2.0f,
5857         3.0f, 4.0f,
5858         5.0f, 6.0f,
5859         7.0f, 8.0f,
5860         9.0f, 10.0f,
5861         11.0f, 12.0f,
5862
5863         11.0f, 12.0f,
5864         13.0f, 14.0f,
5865         15.0f, 16.0f,
5866         17.0f, 18.0f,
5867         19.0f, 20.0f,
5868         21.0f, 22.0f,
5869
5870         21.0f, 22.0f,
5871         23.0f, 24.0f,
5872         25.0f, 26.0f,
5873         27.0f, 28.0f,
5874         29.0f, 30.0f,
5875         31.0f, 32.0f
5876     }));
5877     return result;
5878 }
5879
5880 LayerTestResult<float, 4> Concatenation4dDim0Test(
5881     armnn::IWorkloadFactory& workloadFactory,
5882     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
5883 {
5884     return Concatenation4dDim0TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
5885 }
5886
5887 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
5888 LayerTestResult<T, 4> Concatenation4dDim1TestImpl(
5889     armnn::IWorkloadFactory& workloadFactory,
5890     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5891     float qScale,
5892     int32_t qOffset)
5893 {
5894     armnn::TensorInfo outputTensorInfo({ 1, 9, 2, 2 }, ArmnnType, qScale, qOffset);
5895
5896     LayerTestResult<T, 4> result = Concatenation4dTestImpl<ArmnnType>(
5897         workloadFactory, memoryManager, outputTensorInfo, 1, true, qScale, qOffset);
5898
5899     result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5900         1.0f, 2.0f,
5901         3.0f, 4.0f,
5902         5.0f, 6.0f,
5903         7.0f, 8.0f,
5904         9.0f, 10.0f,
5905         11.0f, 12.0f,
5906
5907         11.0f, 12.0f,
5908         13.0f, 14.0f,
5909         15.0f, 16.0f,
5910         17.0f, 18.0f,
5911         19.0f, 20.0f,
5912         21.0f, 22.0f,
5913
5914         21.0f, 22.0f,
5915         23.0f, 24.0f,
5916         25.0f, 26.0f,
5917         27.0f, 28.0f,
5918         29.0f, 30.0f,
5919         31.0f, 32.0f
5920     }));
5921
5922     return result;
5923 }
5924
5925 LayerTestResult<float, 4> Concatenation4dDim1Test(
5926     armnn::IWorkloadFactory& workloadFactory,
5927     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
5928 {
5929     return Concatenation4dDim1TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
5930 }
5931
5932 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
5933 LayerTestResult<T, 4> Concatenation4dDim2TestImpl(
5934     armnn::IWorkloadFactory& workloadFactory,
5935     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5936     float qScale,
5937     int32_t qOffset)
5938 {
5939     armnn::TensorInfo outputTensorInfo({ 1, 3, 6, 2 }, ArmnnType, qScale, qOffset);
5940
5941     LayerTestResult<T, 4> result = Concatenation4dTestImpl<ArmnnType>(
5942         workloadFactory, memoryManager, outputTensorInfo, 2, true, qScale, qOffset);
5943
5944     result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5945         1.0f, 2.0f,
5946         3.0f, 4.0f,
5947         11.0f, 12.0f,
5948         13.0f, 14.0f,
5949         21.0f, 22.0f,
5950         23.0f, 24.0f,
5951
5952         5.0f, 6.0f,
5953         7.0f, 8.0f,
5954         15.0f, 16.0f,
5955         17.0f, 18.0f,
5956         25.0f, 26.0f,
5957         27.0f, 28.0f,
5958
5959         9.0f, 10.0f,
5960         11.0f, 12.0f,
5961         19.0f, 20.0f,
5962         21.0f, 22.0f,
5963         29.0f, 30.0f,
5964         31.0f, 32.0f
5965     }));
5966
5967     return result;
5968 }
5969
5970 LayerTestResult<float, 4> Concatenation4dDim2Test(
5971     armnn::IWorkloadFactory& workloadFactory,
5972     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
5973 {
5974     return Concatenation4dDim2TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
5975 }
5976
5977 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
5978 LayerTestResult<T, 4> Concatenation4dDim3TestImpl(
5979     armnn::IWorkloadFactory& workloadFactory,
5980     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5981     float qScale,
5982     int32_t qOffset,
5983     bool useSubtensor)
5984 {
5985     armnn::TensorInfo outputTensorInfo({ 1, 3, 2, 6 }, ArmnnType, qScale, qOffset);
5986
5987     LayerTestResult<T, 4> result = Concatenation4dTestImpl<ArmnnType>(
5988         workloadFactory, memoryManager, outputTensorInfo, 3, useSubtensor, qScale, qOffset);
5989
5990     result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5991         1.0f, 2.0f,
5992         11.0f, 12.0f,
5993         21.0f, 22.0f,
5994         3.0f, 4.0f,
5995         13.0f, 14.0f,
5996         23.0f, 24.0f,
5997
5998         5.0f, 6.0f,
5999         15.0f, 16.0f,
6000         25.0f, 26.0f,
6001         7.0f, 8.0f,
6002         17.0f, 18.0f,
6003         27.0f, 28.0f,
6004
6005         9.0f, 10.0f,
6006         19.0f, 20.0f,
6007         29.0f, 30.0f,
6008         11.0f, 12.0f,
6009         21.0f, 22.0f,
6010         31.0f, 32.0f
6011     }));
6012
6013     return result;
6014 }
6015
6016 LayerTestResult<float, 4> Concatenation4dDim3Test(
6017     armnn::IWorkloadFactory& workloadFactory,
6018     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6019     bool useSubtensor)
6020 {
6021     return Concatenation4dDim3TestImpl<armnn::DataType::Float32>(
6022         workloadFactory, memoryManager, 0.0f, 0, useSubtensor);
6023 }
6024
6025 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
6026 LayerTestResult<T, 4> Concatenation4dDiffShapeDim0TestImpl(
6027     armnn::IWorkloadFactory& workloadFactory,
6028     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6029     float qScale,
6030     int32_t qOffset)
6031 {
6032     unsigned int dimension = 0;
6033     armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
6034
6035     auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
6036         1.0f, 2.0f,
6037         3.0f, 4.0f,
6038         5.0f, 6.0f,
6039         7.0f, 8.0f,
6040         9.0f, 10.0f,
6041         11.0f, 12.0f
6042     }));
6043
6044     armnn::TensorInfo inputTensorInfo1({ 2, 3, 2, 2 }, ArmnnType, qScale, qOffset);
6045
6046     auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
6047         11.0f, 12.0f,
6048         13.0f, 14.0f,
6049         15.0f, 16.0f,
6050         17.0f, 18.0f,
6051         19.0f, 20.0f,
6052         21.0f, 22.0f,
6053
6054         21.0f, 22.0f,
6055         23.0f, 24.0f,
6056         25.0f, 26.0f,
6057         27.0f, 28.0f,
6058         29.0f, 30.0f,
6059         31.0f, 32.0f
6060
6061     }));
6062
6063     armnn::TensorInfo outputTensorInfo({ 3, 3, 2, 2 }, ArmnnType, qScale, qOffset);
6064
6065     LayerTestResult<T, 4> result(outputTensorInfo);
6066
6067     std::vector<T> output;
6068     output.resize(outputTensorInfo.GetNumElements());
6069     Concatenate<T>(workloadFactory,
6070                    memoryManager,
6071                    {inputTensorInfo0, inputTensorInfo1},
6072                    {input0.data(), input1.data()},
6073                    outputTensorInfo,
6074                    output.data(),
6075                    dimension,
6076                    true);
6077
6078     result.output = MakeTensor<T, 4>(outputTensorInfo, output);
6079     result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
6080         1.0f, 2.0f,
6081         3.0f, 4.0f,
6082         5.0f, 6.0f,
6083         7.0f, 8.0f,
6084         9.0f, 10.0f,
6085         11.0f, 12.0f,
6086
6087         11.0f, 12.0f,
6088         13.0f, 14.0f,
6089         15.0f, 16.0f,
6090         17.0f, 18.0f,
6091         19.0f, 20.0f,
6092         21.0f, 22.0f,
6093
6094         21.0f, 22.0f,
6095         23.0f, 24.0f,
6096         25.0f, 26.0f,
6097         27.0f, 28.0f,
6098         29.0f, 30.0f,
6099         31.0f, 32.0f
6100     }));
6101
6102     return result;
6103 }
6104
6105 LayerTestResult<float, 4> Concatenation4dDiffShapeDim0Test(
6106     armnn::IWorkloadFactory& workloadFactory,
6107     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
6108 {
6109     return Concatenation4dDiffShapeDim0TestImpl<armnn::DataType::Float32>(
6110         workloadFactory, memoryManager, 0.0f, 0);
6111 }
6112
6113 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
6114 LayerTestResult<T, 4> Concatenation4dDiffShapeDim1TestImpl(
6115     armnn::IWorkloadFactory& workloadFactory,
6116     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6117     float qScale,
6118     int32_t qOffset)
6119 {
6120     unsigned int dimension = 1;
6121     armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
6122
6123     auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
6124         1.0f, 2.0f,
6125         3.0f, 4.0f,
6126         5.0f, 6.0f,
6127         7.0f, 8.0f,
6128         9.0f, 10.0f,
6129         11.0f, 12.0f
6130     }));
6131
6132     armnn::TensorInfo inputTensorInfo1({ 1, 2, 2, 2 }, ArmnnType, qScale, qOffset);
6133
6134     auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
6135         11.0f, 12.0f,
6136         13.0f, 14.0f,
6137         15.0f, 16.0f,
6138         17.0f, 18.0f,
6139
6140     }));
6141
6142     armnn::TensorInfo outputTensorInfo({ 1, 5, 2, 2 }, ArmnnType, qScale, qOffset);
6143
6144     LayerTestResult<T, 4> result(outputTensorInfo);
6145
6146     std::vector<T> output;
6147     output.resize(outputTensorInfo.GetNumElements());
6148     Concatenate<T>(workloadFactory,
6149                    memoryManager,
6150                    {inputTensorInfo0, inputTensorInfo1},
6151                    {input0.data(), input1.data()},
6152                    outputTensorInfo,
6153                    output.data(),
6154                    dimension,
6155                    true);
6156
6157     result.output = MakeTensor<T, 4>(outputTensorInfo, output);
6158     result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
6159         1.0f, 2.0f,
6160         3.0f, 4.0f,
6161         5.0f, 6.0f,
6162         7.0f, 8.0f,
6163         9.0f, 10.0f,
6164         11.0f, 12.0f,
6165         11.0f, 12.0f,
6166         13.0f, 14.0f,
6167         15.0f, 16.0f,
6168         17.0f, 18.0f
6169     }));
6170
6171     return result;
6172 }
6173
6174 LayerTestResult<float, 4> Concatenation4dDiffShapeDim1Test(
6175     armnn::IWorkloadFactory& workloadFactory,
6176     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
6177 {
6178     return Concatenation4dDiffShapeDim1TestImpl<armnn::DataType::Float32>(
6179         workloadFactory, memoryManager, 0.0f, 0);
6180 }
6181
6182 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
6183 LayerTestResult<T, 4> Concatenation4dDiffShapeDim2TestImpl(
6184     armnn::IWorkloadFactory& workloadFactory,
6185     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6186     float qScale,
6187     int32_t qOffset)
6188 {
6189     unsigned int dimension = 2;
6190     armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
6191
6192     auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
6193         1.0f, 2.0f,
6194         3.0f, 4.0f,
6195         5.0f, 6.0f,
6196         7.0f, 8.0f,
6197         9.0f, 10.0f,
6198         11.0f, 12.0f
6199     }));
6200
6201     armnn::TensorInfo inputTensorInfo1({ 1, 3, 3, 2 }, ArmnnType, qScale, qOffset);
6202
6203     auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
6204         11.0f, 12.0f,
6205         13.0f, 14.0f,
6206         15.0f, 16.0f,
6207         17.0f, 18.0f,
6208         19.0f, 20.0f,
6209         21.0f, 22.0f,
6210         23.0f, 24.0f,
6211         25.0f, 26.0f,
6212         27.0f, 28.0f
6213     }));
6214
6215     armnn::TensorInfo outputTensorInfo({ 1, 3, 5, 2 }, ArmnnType, qScale, qOffset);
6216
6217     LayerTestResult<T, 4> result(outputTensorInfo);
6218
6219     std::vector<T> output;
6220     output.resize(outputTensorInfo.GetNumElements());
6221     Concatenate<T>(workloadFactory,
6222                    memoryManager,
6223                    {inputTensorInfo0, inputTensorInfo1},
6224                    {input0.data(), input1.data()},
6225                    outputTensorInfo,
6226                    output.data(),
6227                    dimension,
6228                    true);
6229
6230     result.output = MakeTensor<T, 4>(outputTensorInfo, output);
6231     result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
6232         1.0f, 2.0f,
6233         3.0f, 4.0f,
6234         11.0f, 12.0f,
6235         13.0f, 14.0f,
6236         15.0f, 16.0f,
6237
6238         5.0f, 6.0f,
6239         7.0f, 8.0f,
6240         17.0f, 18.0f,
6241         19.0f, 20.0f,
6242         21.0f, 22.0f,
6243
6244         9.0f, 10.0f,
6245         11.0f, 12.0f,
6246         23.0f, 24.0f,
6247         25.0f, 26.0f,
6248         27.0f, 28.0f
6249     }));
6250
6251     return result;
6252 }
6253
6254 LayerTestResult<float, 4> Concatenation4dDiffShapeDim2Test(
6255     armnn::IWorkloadFactory& workloadFactory,
6256     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
6257 {
6258     return Concatenation4dDiffShapeDim2TestImpl<armnn::DataType::Float32>(
6259         workloadFactory, memoryManager, 0.0f, 0);
6260 }
6261
6262 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
6263 LayerTestResult<T, 4> Concatenation4dDiffShapeDim3TestImpl(
6264     armnn::IWorkloadFactory& workloadFactory,
6265     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6266     float qScale,
6267     int32_t qOffset,
6268     bool useSubtensor)
6269 {
6270     unsigned int dimension = 3;
6271     armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
6272
6273     auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
6274         1.0f, 2.0f,
6275         3.0f, 4.0f,
6276         5.0f, 6.0f,
6277         7.0f, 8.0f,
6278         9.0f, 10.0f,
6279         11.0f, 12.0f
6280     }));
6281
6282     armnn::TensorInfo inputTensorInfo1({ 1, 3, 2, 3 }, ArmnnType, qScale, qOffset);
6283
6284     auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
6285         11.0f, 12.0f, 13.0f,
6286         14.0f, 15.0f, 16.0f,
6287
6288         17.0f, 18.0f, 19.0f,
6289         20.0f, 21.0f, 22.0f,
6290
6291         23.0f, 24.0f, 25.0f,
6292         26.0f, 27.0f, 28.0f
6293     }));
6294
6295     armnn::TensorInfo outputTensorInfo({ 1, 3, 2, 5 }, ArmnnType, qScale, qOffset);
6296
6297     LayerTestResult<T, 4> result(outputTensorInfo);
6298
6299     std::vector<T> output;
6300     output.resize(outputTensorInfo.GetNumElements());
6301     Concatenate<T>(workloadFactory,
6302                    memoryManager,
6303                    {inputTensorInfo0, inputTensorInfo1},
6304                    {input0.data(), input1.data()},
6305                    outputTensorInfo,
6306                    output.data(),
6307                    dimension,
6308                    useSubtensor);
6309
6310     result.output = MakeTensor<T, 4>(outputTensorInfo, output);
6311     result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
6312         1.0f, 2.0f, 11.0f, 12.0f, 13.0f,
6313         3.0f, 4.0f, 14.0f, 15.0f, 16.0f,
6314         5.0f, 6.0f, 17.0f, 18.0f, 19.0f,
6315         7.0f, 8.0f, 20.0f, 21.0f, 22.0f,
6316         9.0f, 10.0f, 23.0f, 24.0f, 25.0f,
6317         11.0f, 12.0f, 26.0f, 27.0f, 28.0f
6318     }));
6319
6320     return result;
6321 }
6322
6323 LayerTestResult<float, 4> Concatenation4dDiffShapeDim3Test(
6324     armnn::IWorkloadFactory& workloadFactory,
6325     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6326     bool useSubtensor)
6327 {
6328     return Concatenation4dDiffShapeDim3TestImpl<armnn::DataType::Float32>(
6329         workloadFactory, memoryManager, 0.0f, 0, useSubtensor);
6330 }
6331
6332 LayerTestResult<float, 2> FakeQuantizationTest(
6333     armnn::IWorkloadFactory& workloadFactory,
6334     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
6335 {
6336     constexpr unsigned int width = 2;
6337     constexpr unsigned int height = 3;
6338
6339     const armnn::TensorInfo tensorInfo({height, width },
6340         armnn::DataType::Float32);
6341     auto input = MakeTensor<float, 2>(tensorInfo, std::vector<float>({
6342        -10.0f,  -5.0f,
6343          0.0f,   5.0f,
6344         10.0f,  10.0f
6345     }));
6346
6347     LayerTestResult<float, 2> ret(tensorInfo);
6348
6349     std::unique_ptr<armnn::ITensorHandle> inputHandle  = workloadFactory.CreateTensorHandle(tensorInfo);
6350
6351     std::unique_ptr<armnn::ITensorHandle> outputHandle  = workloadFactory.CreateTensorHandle(tensorInfo);
6352
6353     armnn::FakeQuantizationQueueDescriptor data;
6354     armnn::WorkloadInfo info;
6355
6356     AddInputToWorkload(data, info, tensorInfo, inputHandle.get());
6357     AddOutputToWorkload(data, info, tensorInfo, outputHandle.get());
6358     float min = -10.f;
6359     float max = 10.f;
6360
6361     data.m_Parameters.m_Min = min;
6362     data.m_Parameters.m_Max = max;
6363
6364     armnn::PassthroughCpuTensorHandle refHandle(tensorInfo, &ret.outputExpected[0][0]);
6365     armnn::FakeQuantizationQueueDescriptor refData = data;
6366     armnn::WorkloadInfo refInfo = info;
6367     SetWorkloadOutput(refData, refInfo, 0, tensorInfo, &refHandle);
6368
6369     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateFakeQuantization(data, info);
6370
6371     inputHandle->Allocate();
6372     outputHandle->Allocate();
6373
6374     CopyDataToITensorHandle(inputHandle.get(), &input[0][0]);
6375
6376     workload->PostAllocationConfigure();
6377     workload->Execute();
6378
6379     CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get());
6380
6381     ret.outputExpected = MakeTensor<float, 2>(tensorInfo, std::vector<float>({
6382         0.0f,     63.0f,
6383         128.0f,   191.0f,
6384         255.0f,   255.0f
6385     }));
6386     return ret;
6387 }
6388
6389 namespace
6390 {
6391 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
6392 LayerTestResult<T, 4> L2NormalizationTestImpl(
6393     armnn::IWorkloadFactory& workloadFactory,
6394     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6395     const armnn::TensorShape& inputOutputTensorShape,
6396     float scale,
6397     int32_t offset,
6398     const std::vector<float>& inputValues,
6399     float outScale,
6400     int32_t outOffset,
6401     const std::vector<float>& expectedOutputValues,
6402     const armnn::DataLayout layout,
6403     float epsilon = 1e-12f)
6404 {
6405     const armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, ArmnnType, scale, offset);
6406     const armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, ArmnnType, outScale, outOffset);
6407
6408     // at this point if we require it permute the input data
6409     const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
6410     std::vector<float> inputData = inputValues;
6411     if (layout == armnn::DataLayout::NHWC)
6412     {
6413         std::vector<float> tmp(inputData.size());
6414         armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
6415         inputData = tmp;
6416     }
6417
6418     auto inputTensor = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(
6419                                                          inputTensorInfo.GetQuantizationScale(),
6420                                                          inputTensorInfo.GetQuantizationOffset(),
6421                                                          inputData));
6422
6423     std::vector<float> expectedOutputData = expectedOutputValues;
6424     if (layout == armnn::DataLayout::NHWC)
6425     {
6426         std::vector<float> tmp(expectedOutputData.size());
6427         armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, expectedOutputData.data(), tmp.data(),
6428                             sizeof(float));
6429         expectedOutputData = tmp;
6430     }
6431
6432     LayerTestResult<T, 4> result(outputTensorInfo);
6433     result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(
6434                                                                outputTensorInfo.GetQuantizationScale(),
6435                                                                outputTensorInfo.GetQuantizationOffset(),
6436                                                                expectedOutputData));
6437
6438     std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
6439     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6440
6441     armnn::L2NormalizationQueueDescriptor descriptor;
6442     descriptor.m_Parameters.m_Eps = epsilon;
6443     descriptor.m_Parameters.m_DataLayout = layout;
6444     armnn::WorkloadInfo info;
6445
6446     AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
6447     AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
6448
6449     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateL2Normalization(descriptor, info);
6450
6451     inputHandle->Allocate();
6452     outputHandle->Allocate();
6453
6454     CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
6455
6456     workload->PostAllocationConfigure();
6457     ExecuteWorkload(*workload, memoryManager);
6458
6459     CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
6460
6461     return result;
6462 }
6463
6464 float CalcInvL2Norm(std::initializer_list<float> elements)
6465 {
6466     const float reduction = std::accumulate(elements.begin(), elements.end(), 0.0f,
6467         [](float acc, float element) { return acc + element * element; });
6468     return 1.0f / sqrtf(reduction);
6469 }
6470
6471 } // anonymous namespace
6472
6473 template<armnn::DataType ArmnnType, typename T>
6474 LayerTestResult<T, 2> Pad2dTestCommon(
6475     armnn::IWorkloadFactory& workloadFactory,
6476     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6477     float qScale,
6478     int32_t qOffset,
6479     const float customPaddingValue)
6480 {
6481     const armnn::TensorShape inputShape{ 3, 3 };
6482     const armnn::TensorShape outputShape{ 7, 7 };
6483
6484     const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset);
6485     const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType, qScale, qOffset);
6486
6487     std::vector<T> inputValues(
6488     QuantizedVector<T>(qScale, qOffset,
6489     {
6490       // Height (3) x Width (3)
6491       4, 8, 6,
6492       7, 4, 4,
6493       3, 2, 4
6494     }));
6495
6496     auto p = customPaddingValue;
6497     std::vector<T> expectedOutputValues;
6498     expectedOutputValues = (
6499     QuantizedVector<T>(qScale, qOffset,
6500     {
6501       p, p, p, p, p, p, p,
6502       p, p, p, p, p, p, p,
6503       p, p, 4, 8, 6, p, p,
6504       p, p, 7, 4, 4, p, p,
6505       p, p, 3, 2, 4, p, p,
6506       p, p, p, p, p, p, p,
6507       p, p, p, p, p, p, p
6508     }));
6509
6510     auto inputTensor = MakeTensor<T, 2>(inputTensorInfo, std::vector<T>(inputValues));
6511
6512     LayerTestResult<T, 2> result(outputTensorInfo);
6513     result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, std::vector<T>(expectedOutputValues));
6514
6515     std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
6516     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6517
6518     armnn::PadQueueDescriptor descriptor;
6519
6520     std::vector<std::pair<unsigned int, unsigned int>> padList;
6521     padList.push_back(std::pair<unsigned int, unsigned int>(2,2));
6522     padList.push_back(std::pair<unsigned int, unsigned int>(2,2));
6523
6524     descriptor.m_Parameters.m_PadList = padList;
6525     descriptor.m_Parameters.m_PadValue = customPaddingValue;
6526     armnn::WorkloadInfo info;
6527
6528     AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
6529     AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
6530
6531     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
6532
6533     inputHandle->Allocate();
6534     outputHandle->Allocate();
6535
6536     CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]);
6537
6538     workload->PostAllocationConfigure();
6539     workload->Execute();
6540
6541     CopyDataFromITensorHandle(&result.output[0][0], outputHandle.get());
6542
6543     return result;
6544 }
6545
6546 template<armnn::DataType ArmnnType, typename T>
6547 LayerTestResult<T, 3> Pad3dTestCommon(
6548     armnn::IWorkloadFactory& workloadFactory,
6549     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6550     float qScale,
6551     int32_t qOffset)
6552 {
6553     const armnn::TensorShape inputShape{ 2, 2, 2 };
6554     const armnn::TensorShape outputShape{ 3, 5, 6 };
6555
6556     const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset);
6557     const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType, qScale, qOffset);
6558
6559     std::vector<T> inputValues(
6560       QuantizedVector<T>(qScale,qOffset,
6561     {
6562         // Channel 0, Height (2) x Width (2)
6563         0, 4,
6564         2, 5,
6565
6566         // Channel 1, Height (2) x Width (2)
6567         6, 1,
6568         5, 2
6569     }));
6570
6571     std::vector<T> expectedOutputValues(
6572       QuantizedVector<T>(qScale,qOffset,
6573     {
6574
6575         0, 0, 0, 0, 0, 0,
6576         0, 0, 0, 0, 0, 0,
6577         0, 0, 0, 4, 0, 0,
6578         0, 0, 2, 5, 0, 0,
6579         0, 0, 0, 0, 0, 0,
6580
6581         0, 0, 0, 0, 0, 0,
6582         0, 0, 0, 0, 0, 0,
6583         0, 0, 6, 1, 0, 0,
6584         0, 0, 5, 2, 0, 0,
6585         0, 0, 0, 0, 0, 0,
6586
6587         0, 0, 0, 0, 0, 0,
6588         0, 0, 0, 0, 0, 0,
6589         0, 0, 0, 0, 0, 0,
6590         0, 0, 0, 0, 0, 0,
6591         0, 0, 0, 0, 0, 0
6592
6593     }));
6594
6595     auto inputTensor = MakeTensor<T, 3>(inputTensorInfo, std::vector<T>(inputValues));
6596
6597     LayerTestResult<T, 3> result(outputTensorInfo);
6598     result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, std::vector<T>(expectedOutputValues));
6599
6600     std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
6601     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6602
6603     armnn::PadQueueDescriptor descriptor;
6604
6605     std::vector<std::pair<unsigned int, unsigned int>> PadList;
6606     PadList.push_back(std::pair<unsigned int, unsigned int>(0,1));
6607     PadList.push_back(std::pair<unsigned int, unsigned int>(2,1));
6608     PadList.push_back(std::pair<unsigned int, unsigned int>(2,2));
6609
6610     descriptor.m_Parameters.m_PadList = PadList;
6611     armnn::WorkloadInfo info;
6612
6613     AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
6614     AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
6615
6616     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
6617
6618     inputHandle->Allocate();
6619     outputHandle->Allocate();
6620
6621     CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0]);
6622
6623     workload->PostAllocationConfigure();
6624     workload->Execute();
6625
6626     CopyDataFromITensorHandle(&result.output[0][0][0], outputHandle.get());
6627
6628     return result;
6629 }
6630
6631 template<armnn::DataType ArmnnType, typename T>
6632 LayerTestResult<T, 4> Pad4dTestCommon(
6633     armnn::IWorkloadFactory& workloadFactory,
6634     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6635     float qScale,
6636     int32_t qOffset)
6637 {
6638     const armnn::TensorShape inputShape{ 2, 2, 3, 2 };
6639     const armnn::TensorShape outputShape{ 4, 5, 7, 4 };
6640
6641     const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset);
6642     const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType, qScale, qOffset);
6643
6644     std::vector<T> inputValues(
6645       QuantizedVector<T>(qScale,qOffset,
6646     {
6647         // Batch 0, Channel 0, Height (3) x Width (2)
6648         0, 1,
6649         2, 3,
6650         4, 5,
6651
6652         // Batch 0, Channel 1, Height (3) x Width (2)
6653         6, 7,
6654         8, 9,
6655         10, 11,
6656
6657         // Batch 1, Channel 0, Height (3) x Width (2)
6658         12, 13,
6659         14, 15,
6660         16, 17,
6661
6662         // Batch 1, Channel 1, Height (3) x Width (2)
6663         18, 19,
6664         20, 21,
6665         22, 23
6666     }));
6667
6668     std::vector<T> expectedOutputValues(
6669       QuantizedVector<T>(qScale,qOffset,
6670     {
6671         0, 0, 0, 0,
6672         0, 0, 0, 0,
6673         0, 0, 0, 0,
6674         0, 0, 0, 0,
6675         0, 0, 0, 0,
6676         0, 0, 0, 0,
6677         0, 0, 0, 0,
6678
6679         0, 0, 0, 0,
6680         0, 0, 0, 0,
6681         0, 0, 0, 0,
6682         0, 0, 0, 0,
6683         0, 0, 0, 0,
6684         0, 0, 0, 0,
6685         0, 0, 0, 0,
6686
6687         0, 0, 0, 0,
6688         0, 0, 0, 0,
6689         0, 0, 0, 0,
6690         0, 0, 0, 0,
6691         0, 0, 0, 0,
6692         0, 0, 0, 0,
6693         0, 0, 0, 0,
6694
6695         0, 0, 0, 0,
6696         0, 0, 0, 0,
6697         0, 0, 0, 0,
6698         0, 0, 0, 0,
6699         0, 0, 0, 0,
6700         0, 0, 0, 0,
6701         0, 0, 0, 0,
6702
6703         0, 0, 0, 0,
6704         0, 0, 0, 0,
6705         0, 0, 0, 0,
6706         0, 0, 0, 0,
6707         0, 0, 0, 0,
6708         0, 0, 0, 0,
6709         0, 0, 0, 0,
6710
6711         0, 0, 0, 0,
6712         0, 0, 0, 0,
6713         0, 0, 0, 0,
6714         0, 0, 0, 0,
6715         0, 0, 0, 0,
6716         0, 0, 0, 0,
6717         0, 0, 0, 0,
6718
6719         0, 0, 0, 0,
6720         0, 0, 0, 0,
6721         0, 0, 0, 0,
6722         0, 0, 0, 0,
6723         0, 0, 0, 0,
6724         0, 0, 0, 0,
6725         0, 0, 0, 0,
6726
6727         0, 0, 0, 0,
6728         0, 0, 0, 0,
6729         0, 0, 0, 0,
6730         0, 0, 1, 0,
6731         0, 2, 3, 0,
6732         0, 4, 5, 0,
6733         0, 0, 0, 0,
6734
6735         0, 0, 0, 0,
6736         0, 0, 0, 0,
6737         0, 0, 0, 0,
6738         0, 6, 7, 0,
6739         0, 8, 9, 0,
6740         0, 10, 11, 0,
6741         0, 0, 0, 0,
6742
6743         0, 0, 0, 0,
6744         0, 0, 0, 0,
6745         0, 0, 0, 0,
6746         0, 0, 0, 0,
6747         0, 0, 0, 0,
6748         0, 0, 0, 0,
6749         0, 0, 0, 0,
6750
6751         0, 0, 0, 0,
6752         0, 0, 0, 0,
6753         0, 0, 0, 0,
6754         0, 0, 0, 0,
6755         0, 0, 0, 0,
6756         0, 0, 0, 0,
6757         0, 0, 0, 0,
6758
6759         0, 0, 0, 0,
6760         0, 0, 0, 0,
6761         0, 0, 0, 0,
6762         0, 0, 0, 0,
6763         0, 0, 0, 0,
6764         0, 0, 0, 0,
6765         0, 0, 0, 0,
6766
6767         0, 0, 0, 0,
6768         0, 0, 0, 0,
6769         0, 0, 0, 0,
6770         0, 12, 13, 0,
6771         0, 14, 15, 0,
6772         0, 16, 17, 0,
6773         0, 0, 0, 0,
6774
6775         0, 0, 0, 0,
6776         0, 0, 0, 0,
6777         0, 0, 0, 0,
6778         0, 18, 19, 0,
6779         0, 20, 21, 0,
6780         0, 22, 23, 0,
6781         0, 0, 0, 0,
6782
6783         0, 0, 0, 0,
6784         0, 0, 0, 0,
6785         0, 0, 0, 0,
6786         0, 0, 0, 0,
6787         0, 0, 0, 0,
6788         0, 0, 0, 0,
6789         0, 0, 0, 0,
6790
6791         0, 0, 0, 0,
6792         0, 0, 0, 0,
6793         0, 0, 0, 0,
6794         0, 0, 0, 0,
6795         0, 0, 0, 0,
6796         0, 0, 0, 0,
6797         0, 0, 0, 0,
6798
6799         0, 0, 0, 0,
6800         0, 0, 0, 0,
6801         0, 0, 0, 0,
6802         0, 0, 0, 0,
6803         0, 0, 0, 0,
6804         0, 0, 0, 0,
6805         0, 0, 0, 0,
6806
6807         0, 0, 0, 0,
6808         0, 0, 0, 0,
6809         0, 0, 0, 0,
6810         0, 0, 0, 0,
6811         0, 0, 0, 0,
6812         0, 0, 0, 0,
6813         0, 0, 0, 0,
6814
6815         0, 0, 0, 0,
6816         0, 0, 0, 0,
6817         0, 0, 0, 0,
6818         0, 0, 0, 0,
6819         0, 0, 0, 0,
6820         0, 0, 0, 0,
6821         0, 0, 0, 0,
6822
6823         0, 0, 0, 0,
6824         0, 0, 0, 0,
6825         0, 0, 0, 0,
6826         0, 0, 0, 0,
6827         0, 0, 0, 0,
6828         0, 0, 0, 0,
6829         0, 0, 0, 0
6830     }));
6831
6832     auto inputTensor = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(inputValues));
6833
6834     LayerTestResult<T, 4> result(outputTensorInfo);
6835     result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(expectedOutputValues));
6836
6837     std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
6838     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6839
6840     armnn::PadQueueDescriptor descriptor;
6841
6842     std::vector<std::pair<unsigned int, unsigned int>> PadList;
6843     PadList.push_back(std::pair<unsigned int, unsigned int>(1,1));
6844     PadList.push_back(std::pair<unsigned int, unsigned int>(2,1));
6845     PadList.push_back(std::pair<unsigned int, unsigned int>(3,1));
6846     PadList.push_back(std::pair<unsigned int, unsigned int>(1,1));
6847
6848     descriptor.m_Parameters.m_PadList = PadList;
6849     armnn::WorkloadInfo info;
6850
6851     AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
6852     AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
6853
6854     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
6855
6856     inputHandle->Allocate();
6857     outputHandle->Allocate();
6858
6859     CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
6860
6861     workload->PostAllocationConfigure();
6862     workload->Execute();
6863
6864     CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
6865
6866     return result;
6867 }
6868
6869 LayerTestResult<uint8_t, 2> PadUint82dTest(
6870     armnn::IWorkloadFactory& workloadFactory,
6871     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
6872 {
6873     return Pad2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
6874 }
6875
6876 LayerTestResult<uint8_t, 2> PadUint82dCustomPaddingTest(
6877     armnn::IWorkloadFactory& workloadFactory,
6878     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
6879 {
6880     return Pad2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0, 1.0f);
6881 }
6882
6883 LayerTestResult<uint8_t, 3> PadUint83dTest(
6884     armnn::IWorkloadFactory& workloadFactory,
6885     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
6886 {
6887     return Pad3dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
6888 }
6889
6890 LayerTestResult<uint8_t, 4> PadUint84dTest(
6891     armnn::IWorkloadFactory& workloadFactory,
6892     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
6893 {
6894     return Pad4dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
6895 }
6896
6897
6898 template LayerTestResult<typename armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 2>
6899 Pad2dTestCommon<armnn::DataType::QuantisedSymm16>(
6900     armnn::IWorkloadFactory& workloadFactory,
6901     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6902     float qScale,
6903     int32_t qOffset,
6904     const float customPaddingValue);
6905
6906 template LayerTestResult<typename armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 3>
6907 Pad3dTestCommon<armnn::DataType::QuantisedSymm16>(
6908     armnn::IWorkloadFactory& workloadFactory,
6909     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6910     float qScale,
6911     int32_t qOffset);
6912
6913 template LayerTestResult<typename armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
6914 Pad4dTestCommon<armnn::DataType::QuantisedSymm16>(
6915     armnn::IWorkloadFactory& workloadFactory,
6916     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6917     float qScale,
6918     int32_t qOffset);
6919
6920 LayerTestResult<float, 2> PadFloat322dTest(
6921     armnn::IWorkloadFactory& workloadFactory,
6922     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
6923 {
6924     return Pad2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
6925 }
6926
6927 LayerTestResult<float, 2> PadFloat322dCustomPaddingTest(
6928     armnn::IWorkloadFactory& workloadFactory,
6929     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
6930 {
6931     return Pad2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0, 1.0f);
6932 }
6933
6934 LayerTestResult<float, 3> PadFloat323dTest(
6935     armnn::IWorkloadFactory& workloadFactory,
6936     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
6937 {
6938     return Pad3dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
6939 }
6940
6941 LayerTestResult<float, 4> PadFloat324dTest(
6942     armnn::IWorkloadFactory& workloadFactory,
6943     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
6944 {
6945     return Pad4dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
6946 }
6947
6948 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
6949 LayerTestResult<T, 4> L2NormalizationEpsilonTestCommon(
6950         armnn::IWorkloadFactory& workloadFactory,
6951         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6952         float scale,
6953         int32_t offset,
6954         float outScale,
6955         int32_t outOffset,
6956         const armnn::DataLayout layout,
6957         float epsilon)
6958 {
6959     // Width: 1
6960     // Height: 1
6961     // Channels: 3
6962     // BatchSize: 1
6963     unsigned int numberOfBatches = 1;
6964     unsigned int numberOfChannels = 3;
6965     unsigned int height = 1;
6966     unsigned int width = 1;
6967
6968     const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
6969             numberOfBatches, numberOfChannels, height, width, layout);
6970
6971     // 0.0000001^2 + 0.00000002^2 + 0.00000003^2 < 1e-12
6972     std::vector<float> inputValues
6973     {
6974         // Batch 0, Channel 0, Height (1) x Width (1)
6975         0.00000001f,
6976
6977         // Batch 0, Channel 1, Height (1) x Width (1)
6978         0.00000002f,
6979
6980         // Batch 0, Channel 2, Height (1) x Width (1)
6981         0.00000003f,
6982     };
6983
6984     const float approxInvL2Norm = 1.f / sqrtf(epsilon);
6985     std::vector<float> expectedOutputValues
6986     {
6987         // Batch 0, Channel 0, Height (1) x Width (1)
6988         0.00000001f * approxInvL2Norm,
6989         0.00000002f * approxInvL2Norm,
6990         0.00000003f * approxInvL2Norm,
6991     };
6992
6993     return L2NormalizationTestImpl<ArmnnType>(workloadFactory, memoryManager, inputOutputShape, scale, offset,
6994                                               inputValues, outScale, outOffset, expectedOutputValues, layout,
6995                                               epsilon);
6996 }
6997
6998
6999 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
7000 LayerTestResult<T, 4> L2Normalization1dTestCommon(
7001         armnn::IWorkloadFactory& workloadFactory,
7002         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7003         float scale,
7004         int32_t offset,
7005         float outScale,
7006         int32_t outOffset,
7007         const armnn::DataLayout layout)
7008 {
7009     // Width: 1
7010     // Height: 1
7011     // Channels: 10
7012     // BatchSize: 1
7013     unsigned int numberOfBatches = 1;
7014     unsigned int numberOfChannels = 10;
7015     unsigned int height = 1;
7016     unsigned int width = 1;
7017
7018
7019     const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
7020             numberOfBatches, numberOfChannels, height, width, layout);
7021     std::vector<float> inputValues
7022     {
7023         // Batch 0, Channel 0, Height (1) x Width (1)
7024         1.0f,
7025
7026         // Batch 0, Channel 1, Height (1) x Width (1)
7027         2.0f,
7028
7029         // Batch 0, Channel 2, Height (1) x Width (1)
7030         3.0f,
7031
7032         // Batch 0, Channel 3, Height (1) x Width (1)
7033         4.0f,
7034
7035         // Batch 0, Channel 4, Height (1) x Width (1)
7036         5.0f,
7037
7038         // Batch 0, Channel 5, Height (1) x Width (1)
7039         6.0f,
7040
7041         // Batch 0, Channel 6, Height (1) x Width (1)
7042         7.0f,
7043
7044         // Batch 0, Channel 7, Height (1) x Width (1)
7045         8.0f,
7046
7047         // Batch 0, Channel 8, Height (1) x Width (1)
7048         9.0f,
7049
7050         // Batch 0, Channel 9, Height (1) x Width (1)
7051         10.0f
7052     };
7053     const float approxInvL2Norm = 0.050964719f;
7054     std::vector<float> expectedOutputValues
7055     {
7056         // Batch 0, Channel 0, Height (1) x Width (1)
7057         1.0f * approxInvL2Norm,
7058         2.0f * approxInvL2Norm,
7059         3.0f * approxInvL2Norm,
7060         4.0f * approxInvL2Norm,
7061         5.0f * approxInvL2Norm,
7062         6.0f * approxInvL2Norm,
7063         7.0f * approxInvL2Norm,
7064         8.0f * approxInvL2Norm,
7065         9.0f * approxInvL2Norm,
7066         10.0f * approxInvL2Norm
7067     };
7068
7069
7070     return L2NormalizationTestImpl<ArmnnType>(workloadFactory, memoryManager, inputOutputShape, scale, offset,
7071                                               inputValues, outScale, outOffset, expectedOutputValues, layout);
7072 }
7073
7074 LayerTestResult<float, 4> L2NormalizationDefaultEpsilonTest(
7075         armnn::IWorkloadFactory& workloadFactory,
7076         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7077         const armnn::DataLayout layout)
7078 {
7079     // Dummy descriptor to get the default value of epsilon.
7080     armnn::L2NormalizationDescriptor descriptor;
7081
7082     return L2NormalizationEpsilonTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.f, 0, 0.f, 0,
7083                                                                       layout, descriptor.m_Eps);
7084 }
7085
7086 LayerTestResult<float, 4> L2NormalizationNonDefaultEpsilonTest(
7087         armnn::IWorkloadFactory& workloadFactory,
7088         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7089         const armnn::DataLayout layout)
7090 {
7091     return L2NormalizationEpsilonTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.f, 0, 0.f, 0,
7092                                                                       layout, 1e-9f);
7093 }
7094
7095 LayerTestResult<float, 4> L2Normalization1dTest(
7096     armnn::IWorkloadFactory& workloadFactory,
7097     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7098     const armnn::DataLayout layout)
7099 {
7100     return L2Normalization1dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.f, 0, 0.f, 0,layout);
7101 }
7102
7103 LayerTestResult<int16_t, 4> L2Normalization1dInt16Test(
7104     armnn::IWorkloadFactory& workloadFactory,
7105     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7106     const armnn::DataLayout layout)
7107 {
7108     return L2Normalization1dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.f, 0, 1.f, 0,
7109                                                                          layout);
7110 }
7111
7112 LayerTestResult<uint8_t, 4> L2Normalization1dUint8Test(
7113     armnn::IWorkloadFactory& workloadFactory,
7114     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7115     const armnn::DataLayout layout)
7116 {
7117     return L2Normalization1dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.f, 0,
7118                                                                          1.f/128, 128, layout);
7119 }
7120
7121 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
7122 LayerTestResult<T, 4> L2Normalization2dTestCommon(
7123     armnn::IWorkloadFactory& workloadFactory,
7124     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7125     float scale,
7126     int32_t offset,
7127     float outScale,
7128     int32_t outOffset,
7129     const armnn::DataLayout layout)
7130 {
7131     // Width: 5
7132     // Height: 1
7133     // Channels: 2
7134     // BatchSize: 1
7135     unsigned int numberOfBatches = 1;
7136     unsigned int numberOfChannels = 2;
7137     unsigned int height = 1;
7138     unsigned int width = 5;
7139
7140     const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
7141             numberOfBatches, numberOfChannels, height, width, layout);
7142     std::vector<float> inputValues
7143     {
7144         // Batch 0, Channel 0, Height (1) x Width (5)
7145         1.0f, 3.0f, 5.0f, 7.0f,  9.0f,
7146
7147         // Batch 0, Channel 1, Height (1) x Width (5)
7148         2.0f, 4.0f, 6.0f, 8.0f, 10.0f
7149     };
7150     std::vector<float> expectedOutputValues
7151     {
7152         // Batch 0, Channel 0, Height (1) x Width (5)
7153         1.0f * CalcInvL2Norm({ 1.0f,  2.0f }),
7154         3.0f * CalcInvL2Norm({ 3.0f,  4.0f }),
7155         5.0f * CalcInvL2Norm({ 5.0f,  6.0f }),
7156         7.0f * CalcInvL2Norm({ 7.0f,  8.0f }),
7157         9.0f * CalcInvL2Norm({ 9.0f, 10.0f }),
7158
7159         // Batch 0, Channel 1, Height (1) x Width (5)
7160         2.0f * CalcInvL2Norm({ 1.0f,  2.0f }),
7161         4.0f * CalcInvL2Norm({ 3.0f,  4.0f }),
7162         6.0f * CalcInvL2Norm({ 5.0f,  6.0f }),
7163         8.0f * CalcInvL2Norm({ 7.0f,  8.0f }),
7164         10.0f * CalcInvL2Norm({ 9.0f, 10.0f })
7165     };
7166
7167     return L2NormalizationTestImpl<ArmnnType>(workloadFactory, memoryManager, inputOutputShape, scale, offset,
7168                                               inputValues, outScale, outOffset, expectedOutputValues, layout);
7169 }
7170
7171 LayerTestResult<float, 4> L2Normalization2dTest(
7172     armnn::IWorkloadFactory& workloadFactory,
7173     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7174     const armnn::DataLayout layout)
7175 {
7176     return L2Normalization2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.f, 0, 0.f, 0,
7177                                                                  layout);
7178 }
7179
7180 LayerTestResult<int16_t, 4> L2Normalization2dInt16Test(
7181     armnn::IWorkloadFactory& workloadFactory,
7182     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7183     const armnn::DataLayout layout)
7184 {
7185     return L2Normalization1dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.f, 0, 1.f, 0,
7186                                                                          layout);
7187 }
7188
7189 LayerTestResult<uint8_t, 4> L2Normalization2dUint8Test(
7190     armnn::IWorkloadFactory& workloadFactory,
7191     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7192     const armnn::DataLayout layout)
7193 {
7194     return L2Normalization1dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.f, 0,
7195                                                                          1.f/128, 128, layout);
7196 }
7197
7198 LayerTestResult<float, 2> L2Normalization2dShapeTest(
7199     armnn::IWorkloadFactory& workloadFactory,
7200     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7201 {
7202     const armnn::DataLayout layout = armnn::DataLayout::NHWC;
7203     const armnn::TensorShape inputOutputTensorShape = armnn::TensorShape({ 5, 2 });
7204
7205     std::vector<float> inputData
7206     {
7207         1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f, 8.f, 9.f, 10.f
7208     };
7209     std::vector<float> expectedOutputData
7210     {
7211         1.0f * CalcInvL2Norm({ 1.0f,  2.0f }),
7212         2.0f * CalcInvL2Norm({ 1.0f,  2.0f }),
7213         3.0f * CalcInvL2Norm({ 3.0f,  4.0f }),
7214         4.0f * CalcInvL2Norm({ 3.0f,  4.0f }),
7215         5.0f * CalcInvL2Norm({ 5.0f,  6.0f }),
7216         6.0f * CalcInvL2Norm({ 5.0f,  6.0f }),
7217         7.0f * CalcInvL2Norm({ 7.0f,  8.0f }),
7218         8.0f * CalcInvL2Norm({ 7.0f,  8.0f }),
7219         9.0f  * CalcInvL2Norm({ 9.0f, 10.0f }),
7220         10.0f * CalcInvL2Norm({ 9.0f, 10.0f })
7221     };
7222
7223     const armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32, 0.f, 0);
7224     const armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32, 0.f, 0);
7225
7226     auto inputTensor = MakeTensor<float, 2>(inputTensorInfo, QuantizedVector<float>(
7227                                                              inputTensorInfo.GetQuantizationScale(),
7228                                                              inputTensorInfo.GetQuantizationOffset(),
7229                                                              inputData));
7230
7231     LayerTestResult<float, 2> result(outputTensorInfo);
7232     result.outputExpected = MakeTensor<float, 2>(outputTensorInfo, QuantizedVector<float>(
7233                                                                    outputTensorInfo.GetQuantizationScale(),
7234                                                                    outputTensorInfo.GetQuantizationOffset(),
7235                                                                    expectedOutputData));
7236
7237     std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
7238     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7239
7240     armnn::L2NormalizationQueueDescriptor descriptor;
7241     descriptor.m_Parameters.m_Eps = 1e-12f;
7242     descriptor.m_Parameters.m_DataLayout = layout;
7243     armnn::WorkloadInfo info;
7244
7245     AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
7246     AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
7247
7248     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateL2Normalization(descriptor, info);
7249
7250     inputHandle->Allocate();
7251     outputHandle->Allocate();
7252
7253     CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]);
7254
7255     workload->PostAllocationConfigure();
7256     ExecuteWorkload(*workload, memoryManager);
7257
7258     CopyDataFromITensorHandle(&result.output[0][0], outputHandle.get());
7259
7260     return result;
7261 }
7262
7263 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
7264 LayerTestResult<T, 4> L2Normalization3dTestCommon(
7265     armnn::IWorkloadFactory& workloadFactory,
7266     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7267     float scale,
7268     int32_t offset,
7269     float outScale,
7270     int32_t outOffset,
7271     const armnn::DataLayout layout)
7272 {
7273     // Width: 3
7274     // Height: 4
7275     // Channels: 2
7276     // BatchSize: 1
7277     unsigned int numberOfBatches = 1;
7278     unsigned int numberOfChannels = 2;
7279     unsigned int height = 4;
7280     unsigned int width = 3;
7281
7282     const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
7283             numberOfBatches, numberOfChannels, height, width, layout);
7284     std::vector<float> inputValues
7285     {
7286         // Batch 0, Channel 0, Height (4) x Width (3)
7287         119.0f,  21.0f, 150.0f,
7288         149.0f,  32.0f, 179.0f,
7289         15.0f, 227.0f, 141.0f,
7290         147.0f, 199.0f, 220.0f,
7291
7292         // Batch 0, Channel 1, Height (4) x Width (3)
7293         110.0f, 140.0f,  73.0f,
7294         211.0f, 212.0f,  89.0f,
7295         24.0f, 138.0f, 188.0f,
7296         162.0f,  12.0f, 161.0f
7297     };
7298     std::vector<float> expectedOutputValues
7299     {
7300         // Batch 0, Channel 0, Height (4) x Width (3)
7301         119.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
7302         21.0f * CalcInvL2Norm({  21.0f, 140.0f }),
7303         150.0f * CalcInvL2Norm({ 150.0f,  73.0f }),
7304         149.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
7305         32.0f * CalcInvL2Norm({  32.0f, 212.0f }),
7306         179.0f * CalcInvL2Norm({ 179.0f,  89.0f }),
7307         15.0f * CalcInvL2Norm({  15.0f,  24.0f }),
7308         227.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
7309         141.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
7310         147.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
7311         199.0f * CalcInvL2Norm({ 199.0f,  12.0f }),
7312         220.0f * CalcInvL2Norm({ 220.0f, 161.0f }),
7313
7314         // Batch 0, Channel 1, Height (4) x Width (3)
7315         110.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
7316         140.0f * CalcInvL2Norm({  21.0f, 140.0f }),
7317         73.0f * CalcInvL2Norm({ 150.0f,  73.0f }),
7318         211.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
7319         212.0f * CalcInvL2Norm({  32.0f, 212.0f }),
7320         89.0f * CalcInvL2Norm({ 179.0f,  89.0f }),
7321         24.0f * CalcInvL2Norm({  15.0f,  24.0f }),
7322         138.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
7323         188.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
7324         162.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
7325         12.0f * CalcInvL2Norm({ 199.0f,  12.0f }),
7326         161.0f * CalcInvL2Norm({ 220.0f, 161.0f })
7327     };
7328
7329     return L2NormalizationTestImpl<ArmnnType>(workloadFactory, memoryManager, inputOutputShape, scale, offset,
7330                                               inputValues, outScale, outOffset, expectedOutputValues, layout);
7331 }
7332
7333 LayerTestResult<float, 4> L2Normalization3dTest(
7334     armnn::IWorkloadFactory& workloadFactory,
7335     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7336     const armnn::DataLayout layout)
7337 {
7338     return L2Normalization3dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.f, 0, 0.f, 0,
7339                                                                  layout);
7340 }
7341
7342 LayerTestResult<int16_t, 4> L2Normalization3dInt16Test(
7343     armnn::IWorkloadFactory& workloadFactory,
7344     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7345     const armnn::DataLayout layout)
7346 {
7347     return L2Normalization1dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.f, 0, 1.f, 0,
7348                                                                          layout);
7349 }
7350
7351 LayerTestResult<uint8_t, 4> L2Normalization3dUint8Test(
7352     armnn::IWorkloadFactory& workloadFactory,
7353     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7354     const armnn::DataLayout layout)
7355 {
7356     return L2Normalization1dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.f, 0,
7357                                                                          1.f/128, 128, layout);
7358 }
7359
7360 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
7361 LayerTestResult<T, 4> L2Normalization4dTestCommon(
7362     armnn::IWorkloadFactory& workloadFactory,
7363     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7364     float scale,
7365     int32_t offset,
7366     float outScale,
7367     int32_t outOffset,
7368     const armnn::DataLayout layout)
7369 {
7370     // Width: 3
7371     // Height: 4
7372     // Channels: 3
7373     // BatchSize: 2
7374     unsigned int numberOfBatches = 2;
7375     unsigned int numberOfChannels = 3;
7376     unsigned int height = 4;
7377     unsigned int width = 3;
7378
7379     const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
7380             numberOfBatches, numberOfChannels, height, width, layout);
7381     std::vector<float> inputValues
7382     {
7383         // Batch 0, Channel 0, Height (4) x Width (3)
7384         235.0f,  46.0f, 178.0f,
7385         100.0f, 123.0f,  19.0f,
7386         172.0f,  74.0f, 250.0f,
7387         6.0f, 195.0f,  80.0f,
7388
7389         // Batch 0, Channel 1, Height (4) x Width (3)
7390         113.0f,  95.0f, 202.0f,
7391         77.0f, 114.0f,  71.0f,
7392         122.0f, 246.0f, 166.0f,
7393         82.0f,  28.0f,  37.0f,
7394
7395         // Batch 0, Channel 2, Height (4) x Width (3)
7396         56.0f, 170.0f, 162.0f,
7397         194.0f,  89.0f, 254.0f,
7398         12.0f, 209.0f, 200.0f,
7399         1.0f,  64.0f,  54.0f,
7400
7401         // Batch 1, Channel 0, Height (4) x Width (3)
7402         67.0f,  90.0f,  49.0f,
7403         7.0f, 163.0f,  18.0f,
7404         25.0f, 117.0f, 103.0f,
7405         247.0f,  59.0f, 189.0f,
7406
7407         // Batch 1, Channel 1, Height (4) x Width (3)
7408         239.0f, 104.0f, 199.0f,
7409         17.0f, 124.0f, 153.0f,
7410         222.0f, 217.0f, 75.0f,
7411         32.0f, 126.0f, 21.0f,
7412
7413         // Batch 1, Channel 2, Height (4) x Width (3)
7414         97.0f, 145.0f, 215.0f,
7415         115.0f, 116.0f, 238.0f,
7416         226.0f,  16.0f, 132.0f,
7417         92.0f, 125.0f,  88.0f
7418     };
7419     std::vector<float> expectedOutputValues
7420     {
7421         // Batch 0, Channel 0, Height (4) x Width (3)
7422         235.0f * CalcInvL2Norm({ 235.0f, 113.0f,  56.0f }),
7423         46.0f * CalcInvL2Norm({  46.0f,  95.0f, 170.0f }),
7424         178.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
7425         100.0f * CalcInvL2Norm({ 100.0f,  77.0f, 194.0f }),
7426         123.0f * CalcInvL2Norm({ 123.0f, 114.0f,  89.0f }),
7427         19.0f * CalcInvL2Norm({  19.0f,  71.0f, 254.0f }),
7428         172.0f * CalcInvL2Norm({ 172.0f, 122.0f,  12.0f }),
7429         74.0f * CalcInvL2Norm({  74.0f, 246.0f, 209.0f }),
7430         250.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
7431         6.0f * CalcInvL2Norm({   6.0f,  82.0f,   1.0f }),
7432         195.0f * CalcInvL2Norm({ 195.0f,  28.0f,  64.0f }),
7433         80.0f * CalcInvL2Norm({  80.0f,  37.0f,  54.0f }),
7434
7435         // Batch 0, Channel 1, Height (4) x Width (3)
7436         113.0f * CalcInvL2Norm({ 235.0f, 113.0f,  56.0f }),
7437         95.0f * CalcInvL2Norm({  46.0f,  95.0f, 170.0f }),
7438         202.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
7439         77.0f * CalcInvL2Norm({ 100.0f,  77.0f, 194.0f }),
7440         114.0f * CalcInvL2Norm({ 123.0f, 114.0f,  89.0f }),
7441         71.0f * CalcInvL2Norm({  19.0f,  71.0f, 254.0f }),
7442         122.0f * CalcInvL2Norm({ 172.0f, 122.0f,  12.0f }),
7443         246.0f * CalcInvL2Norm({  74.0f, 246.0f, 209.0f }),
7444         166.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
7445         82.0f * CalcInvL2Norm({   6.0f,  82.0f,   1.0f }),
7446         28.0f * CalcInvL2Norm({ 195.0f,  28.0f,  64.0f }),
7447         37.0f * CalcInvL2Norm({  80.0f,  37.0f,  54.0f }),
7448
7449         // Batch 0, Channel 2, Height (4) x Width (3)
7450         56.0f * CalcInvL2Norm({ 235.0f, 113.0f,  56.0f }),
7451         170.0f * CalcInvL2Norm({  46.0f,  95.0f, 170.0f }),
7452         162.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
7453         194.0f * CalcInvL2Norm({ 100.0f,  77.0f, 194.0f }),
7454         89.0f * CalcInvL2Norm({ 123.0f, 114.0f,  89.0f }),
7455         254.0f * CalcInvL2Norm({  19.0f,  71.0f, 254.0f }),
7456         12.0f * CalcInvL2Norm({ 172.0f, 122.0f,  12.0f }),
7457         209.0f * CalcInvL2Norm({  74.0f, 246.0f, 209.0f }),
7458         200.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
7459         1.0f * CalcInvL2Norm({   6.0f,  82.0f,   1.0f }),
7460         64.0f * CalcInvL2Norm({ 195.0f,  28.0f,  64.0f }),
7461         54.0f * CalcInvL2Norm({  80.0f,  37.0f,  54.0f }),
7462
7463         // Batch 1, Channel 0, Height (4) x Width (3)
7464         67.0f * CalcInvL2Norm({  67.0f, 239.0f,  97.0f }),
7465         90.0f * CalcInvL2Norm({  90.0f, 104.0f, 145.0f }),
7466         49.0f * CalcInvL2Norm({  49.0f, 199.0f, 215.0f }),
7467         7.0f * CalcInvL2Norm({   7.0f,  17.0f, 115.0f }),
7468         163.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
7469         18.0f * CalcInvL2Norm({  18.0f, 153.0f, 238.0f }),
7470         25.0f * CalcInvL2Norm({  25.0f, 222.0f, 226.0f }),
7471         117.0f * CalcInvL2Norm({ 117.0f, 217.0f,  16.0f }),
7472         103.0f * CalcInvL2Norm({ 103.0f,  75.0f, 132.0f }),
7473         247.0f * CalcInvL2Norm({ 247.0f,  32.0f,  92.0f }),
7474         59.0f * CalcInvL2Norm({  59.0f, 126.0f, 125.0f }),
7475         189.0f * CalcInvL2Norm({ 189.0f,  21.0f,  88.0f }),
7476
7477         // Batch 1, Channel 1, Height (4) x Width (3)
7478         239.0f * CalcInvL2Norm({  67.0f, 239.0f,  97.0f }),
7479         104.0f * CalcInvL2Norm({  90.0f, 104.0f, 145.0f }),
7480         199.0f * CalcInvL2Norm({  49.0f, 199.0f, 215.0f }),
7481         17.0f * CalcInvL2Norm({   7.0f,  17.0f, 115.0f }),
7482         124.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
7483         153.0f * CalcInvL2Norm({  18.0f, 153.0f, 238.0f }),
7484         222.0f * CalcInvL2Norm({  25.0f, 222.0f, 226.0f }),
7485         217.0f * CalcInvL2Norm({ 117.0f, 217.0f,  16.0f }),
7486         75.0f * CalcInvL2Norm({ 103.0f,  75.0f, 132.0f }),
7487         32.0f * CalcInvL2Norm({ 247.0f,  32.0f,  92.0f }),
7488         126.0f * CalcInvL2Norm({  59.0f, 126.0f, 125.0f }),
7489         21.0f * CalcInvL2Norm({ 189.0f,  21.0f,  88.0f }),
7490
7491         // Batch 1, Channel 2, Height (4) x Width (3)
7492         97.0f * CalcInvL2Norm({  67.0f, 239.0f,  97.0f }),
7493         145.0f * CalcInvL2Norm({  90.0f, 104.0f, 145.0f }),
7494         215.0f * CalcInvL2Norm({  49.0f, 199.0f, 215.0f }),
7495         115.0f * CalcInvL2Norm({   7.0f,  17.0f, 115.0f }),
7496         116.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
7497         238.0f * CalcInvL2Norm({  18.0f, 153.0f, 238.0f }),
7498         226.0f * CalcInvL2Norm({  25.0f, 222.0f, 226.0f }),
7499         16.0f * CalcInvL2Norm({ 117.0f, 217.0f,  16.0f }),
7500         132.0f * CalcInvL2Norm({ 103.0f,  75.0f, 132.0f }),
7501         92.0f * CalcInvL2Norm({ 247.0f,  32.0f,  92.0f }),
7502         125.0f * CalcInvL2Norm({  59.0f, 126.0f, 125.0f }),
7503         88.0f * CalcInvL2Norm({ 189.0f,  21.0f,  88.0f })
7504     };
7505
7506     return L2NormalizationTestImpl<ArmnnType>(workloadFactory, memoryManager, inputOutputShape, scale, offset,
7507                                               inputValues, outScale, outOffset, expectedOutputValues, layout);
7508 }
7509
7510 LayerTestResult<float, 4> L2Normalization4dTest(
7511     armnn::IWorkloadFactory& workloadFactory,
7512     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7513     const armnn::DataLayout layout)
7514 {
7515     return L2Normalization4dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.f, 0, 0.f, 0,
7516                                                                  layout);
7517 }
7518
7519 LayerTestResult<int16_t, 4> L2Normalization4dInt16Test(
7520     armnn::IWorkloadFactory& workloadFactory,
7521     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7522     const armnn::DataLayout layout)
7523 {
7524     return L2Normalization1dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.f, 0, 1.f, 0,
7525                                                                          layout);
7526 }
7527
7528 LayerTestResult<uint8_t, 4> L2Normalization4dUint8Test(
7529     armnn::IWorkloadFactory& workloadFactory,
7530     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7531     const armnn::DataLayout layout)
7532 {
7533     return L2Normalization1dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.f, 0,
7534                                                                          1.f/128, 128, layout);
7535 }
7536
7537 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
7538 LayerTestResult<T, 4> ConstantTestImpl(
7539     armnn::IWorkloadFactory& workloadFactory,
7540     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7541     float qScale,
7542     int32_t qOffset)
7543 {
7544     constexpr unsigned int inputWidth = 3;
7545     constexpr unsigned int inputHeight = 4;
7546     constexpr unsigned int inputChannels = 3;
7547     constexpr unsigned int inputBatchSize = 2;
7548
7549     constexpr unsigned int outputWidth = inputWidth;
7550     constexpr unsigned int outputHeight = inputHeight;
7551     constexpr unsigned int outputChannels = inputChannels;
7552     constexpr unsigned int outputBatchSize = inputBatchSize;
7553
7554     armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
7555                                         ArmnnType, qScale, qOffset);
7556
7557     armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
7558                                          ArmnnType, qScale, qOffset);
7559
7560     // Set quantization parameters if the requested type is a quantized type.
7561     if(armnn::IsQuantizedType<T>())
7562     {
7563         inputTensorInfo.SetQuantizationScale(qScale);
7564         inputTensorInfo.SetQuantizationOffset(qOffset);
7565         outputTensorInfo.SetQuantizationScale(qScale);
7566         outputTensorInfo.SetQuantizationOffset(qOffset);
7567     }
7568
7569     auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
7570         QuantizedVector<T>(qScale, qOffset, {
7571         // Batch 0, Channel 0
7572         235.0f,  46.0f, 178.0f,
7573         100.0f, 123.0f,  19.0f,
7574         172.0f,  74.0f, 250.0f,
7575           6.0f, 195.0f,  80.0f,
7576
7577         // Batch 0, Channel 1
7578         113.0f,  95.0f, 202.0f,
7579          77.0f, 114.0f,  71.0f,
7580         122.0f, 246.0f, 166.0f,
7581          82.0f,  28.0f,  37.0f,
7582
7583         // Batch 0, Channel 2
7584          56.0f, 170.0f, 162.0f,
7585         194.0f,  89.0f, 254.0f,
7586          12.0f, 209.0f, 200.0f,
7587           1.0f,  64.0f,  54.0f,
7588
7589         // Batch 1, Channel 0
7590          67.0f,  90.0f,  49.0f,
7591           7.0f, 163.0f,  18.0f,
7592          25.0f, 117.0f, 103.0f,
7593         247.0f,  59.0f, 189.0f,
7594
7595         // Batch 1, Channel 1
7596         239.0f, 104.0f, 199.0f,
7597          17.0f, 124.0f, 153.0f,
7598         222.0f, 217.0f, 75.0f,
7599          32.0f, 126.0f, 21.0f,
7600
7601         // Batch 1, Channel 2
7602          97.0f, 145.0f, 215.0f,
7603         115.0f, 116.0f, 238.0f,
7604         226.0f,  16.0f, 132.0f,
7605          92.0f, 125.0f,  88.0f,
7606     })));
7607
7608     LayerTestResult<T, 4> result(outputTensorInfo);
7609     result.outputExpected = input;
7610
7611     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7612
7613     armnn::ScopedCpuTensorHandle constantTensor(inputTensorInfo);
7614     AllocateAndCopyDataToITensorHandle(&constantTensor, &input[0][0][0][0]);
7615
7616     armnn::ConstantQueueDescriptor descriptor;
7617     descriptor.m_LayerOutput = &constantTensor;
7618
7619     armnn::WorkloadInfo info;
7620     AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
7621
7622     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConstant(descriptor, info);
7623
7624     outputHandle->Allocate();
7625
7626     workload->PostAllocationConfigure();
7627     workload->Execute();
7628
7629     CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
7630     return result;
7631 }
7632
7633 LayerTestResult<float, 4> ConstantTest(
7634     armnn::IWorkloadFactory& workloadFactory,
7635     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7636 {
7637     return ConstantTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
7638 }
7639
7640 LayerTestResult<int16_t, 4> ConstantInt16SimpleQuantizationScaleNoOffsetTest(
7641     armnn::IWorkloadFactory& workloadFactory,
7642     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7643 {
7644     return ConstantTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.0f, 0);
7645 }
7646
7647 LayerTestResult<uint8_t, 4> ConstantUint8SimpleQuantizationScaleNoOffsetTest(
7648     armnn::IWorkloadFactory& workloadFactory,
7649     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7650 {
7651     return ConstantTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
7652 }
7653
7654 LayerTestResult<uint8_t, 3> ConcatUint8DifferentQParamsTest(
7655         armnn::IWorkloadFactory& workloadFactory,
7656         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7657 {
7658     unsigned int outputWidth = 3;
7659     unsigned int outputHeight = 6;
7660     unsigned int outputChannels = 3;
7661
7662     unsigned int inputWidth1 = 3;
7663     unsigned int inputHeight1 = 6;
7664     unsigned int inputChannels1 = 2;
7665
7666     unsigned int inputWidth2 = 3;
7667     unsigned int inputHeight2 = 6;
7668     unsigned int inputChannels2 = 1;
7669
7670     // Defines the tensor descriptors.
7671     armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::QuantisedAsymm8);
7672     armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::QuantisedAsymm8);
7673     armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::QuantisedAsymm8);
7674
7675     // Quantized input1 tensor. Range [-3, 1]
7676     const float inputScale1 = 0.015686f;
7677     const int32_t inputOffset1 = 192;
7678
7679     auto input1 = MakeTensor<uint8_t, 3>(inputTensorInfo1, std::vector<uint8_t>(
7680     {
7681         1, 2, 3,
7682         4, 5, 6,
7683         7, 8, 9,
7684         10, 11, 12,
7685         13, 14, 15,
7686         16, 17, 18,
7687
7688         19, 20, 21,
7689         22, 23, 24,
7690         25, 26, 27,
7691         28, 29, 30,
7692         31, 32, 33,
7693         34, 35, 36,
7694     })
7695     );
7696
7697     // Quatized input2 tensor. Range [-1, 4]
7698     const float inputScale2 = 0.019608f;
7699     const int32_t inputOffset2 = 50;
7700
7701     auto input2 = MakeTensor<uint8_t, 3>(inputTensorInfo2, std::vector<uint8_t>(
7702     {
7703         37, 38, 39,
7704         40, 41, 42,
7705         43, 44, 45,
7706         46, 47, 48,
7707         49, 50, 51,
7708         52, 53, 54,
7709     })
7710     );
7711
7712     // Output has the same quantization parameters than input1,
7713     // so that only the requantization of input2 is required
7714     const float outputScale = 0.015686f;
7715     const int32_t outputOffset = 192;
7716
7717     LayerTestResult<uint8_t, 3> ret(outputTensorInfo);
7718
7719     ret.outputExpected = MakeTensor<uint8_t, 3>(outputTensorInfo, std::vector<uint8_t>(
7720     {
7721         1, 2, 3,
7722         4, 5, 6,
7723         7, 8, 9,
7724         10, 11, 12,
7725         13, 14, 15,
7726         16, 17, 18,
7727
7728         19, 20, 21,
7729         22, 23, 24,
7730         25, 26, 27,
7731         28, 29, 30,
7732         31, 32, 33,
7733         34, 35, 36,
7734
7735         176, 177, 178,
7736         179, 181, 182,
7737         183, 184, 186,
7738         187, 188, 189,
7739         191, 192, 193,
7740         195, 196, 197,
7741     })
7742     );
7743
7744     outputTensorInfo.SetQuantizationScale(outputScale);
7745     outputTensorInfo.SetQuantizationOffset(outputOffset);
7746     inputTensorInfo1.SetQuantizationScale(inputScale1);
7747     inputTensorInfo1.SetQuantizationOffset(inputOffset1);
7748     inputTensorInfo2.SetQuantizationScale(inputScale2);
7749     inputTensorInfo2.SetQuantizationOffset(inputOffset2);
7750
7751     std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
7752     armnn::ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
7753
7754     std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
7755     armnn::ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
7756
7757     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7758
7759     bool subTensorsSupported = workloadFactory.SupportsSubTensors();
7760
7761     std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
7762             subTensorsSupported ?
7763             workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
7764             workloadFactory.CreateTensorHandle(inputTensorInfo1);
7765
7766     std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
7767             subTensorsSupported ?
7768             workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
7769             workloadFactory.CreateTensorHandle(inputTensorInfo2);
7770
7771     armnn::ConcatQueueDescriptor data;
7772     armnn::WorkloadInfo info;
7773     AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
7774     AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
7775     AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
7776
7777     data.m_ViewOrigins.push_back(window1);
7778     data.m_ViewOrigins.push_back(window2);
7779
7780     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConcat(data, info);
7781
7782     inputHandle1->Allocate();
7783     inputHandle2->Allocate();
7784     outputHandle->Allocate();
7785
7786     CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
7787     CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
7788
7789     workload->PostAllocationConfigure();
7790     workload->Execute();
7791
7792     CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
7793
7794     return ret;
7795 }
7796
7797 LayerTestResult<uint8_t, 3> ConcatUint8Test(
7798     armnn::IWorkloadFactory& workloadFactory,
7799     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7800 {
7801     unsigned int outputWidth = 3;
7802     unsigned int outputHeight = 6;
7803     unsigned int outputChannels = 3;
7804
7805     unsigned int inputWidth1 = 3;
7806     unsigned int inputHeight1 = 6;
7807     unsigned int inputChannels1 = 2;
7808
7809     unsigned int inputWidth2 = 3;
7810     unsigned int inputHeight2 = 6;
7811     unsigned int inputChannels2 = 1;
7812
7813     // Defines the tensor descriptors.
7814     armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::QuantisedAsymm8);
7815     armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::QuantisedAsymm8);
7816     armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::QuantisedAsymm8);
7817
7818     // Arbitrary scale and offsets. They don't really matter as the Concat operator doesn't dequantize/quantize them.
7819     const float scale = 0.13497836f;
7820     const int32_t offset = -7;
7821
7822     outputTensorInfo.SetQuantizationScale(scale);
7823     outputTensorInfo.SetQuantizationOffset(offset);
7824     inputTensorInfo1.SetQuantizationScale(scale);
7825     inputTensorInfo1.SetQuantizationOffset(offset);
7826     inputTensorInfo2.SetQuantizationScale(scale);
7827     inputTensorInfo2.SetQuantizationOffset(offset);
7828
7829     LayerTestResult<uint8_t, 3> ret(outputTensorInfo);
7830
7831     ret.outputExpected = MakeTensor<uint8_t, 3>(outputTensorInfo, std::vector<uint8_t>(
7832         {
7833             1, 2, 3,
7834             4, 5, 6,
7835             7, 8, 9,
7836             10, 11, 12,
7837             13, 14, 15,
7838             16, 17, 18,
7839
7840             19, 20, 21,
7841             22, 23, 24,
7842             25, 26, 27,
7843             28, 29, 30,
7844             31, 32, 33,
7845             34, 35, 36,
7846
7847             37, 38, 39,
7848             40, 41, 42,
7849             43, 44, 45,
7850             46, 47, 48,
7851             49, 50, 51,
7852             52, 53, 54,
7853         })
7854     );
7855
7856     auto input1 = MakeTensor<uint8_t, 3>(inputTensorInfo1, std::vector<uint8_t>(
7857     {
7858         1, 2, 3,
7859         4, 5, 6,
7860         7, 8, 9,
7861         10, 11, 12,
7862         13, 14, 15,
7863         16, 17, 18,
7864
7865         19, 20, 21,
7866         22, 23, 24,
7867         25, 26, 27,
7868         28, 29, 30,
7869         31, 32, 33,
7870         34, 35, 36,
7871     })
7872     );
7873
7874     auto input2 = MakeTensor<uint8_t, 3>(inputTensorInfo2, std::vector<uint8_t>(
7875     {
7876         37, 38, 39,
7877         40, 41, 42,
7878         43, 44, 45,
7879         46, 47, 48,
7880         49, 50, 51,
7881         52, 53, 54,
7882     })
7883     );
7884
7885     std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
7886     armnn::ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
7887
7888     std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
7889     armnn::ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
7890
7891
7892     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7893
7894     bool subTensorsSupported = workloadFactory.SupportsSubTensors();
7895
7896     std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
7897         subTensorsSupported ?
7898             workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
7899             workloadFactory.CreateTensorHandle(inputTensorInfo1);
7900
7901     std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
7902         subTensorsSupported ?
7903             workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
7904             workloadFactory.CreateTensorHandle(inputTensorInfo2);
7905
7906
7907     armnn::ConcatQueueDescriptor data;
7908     armnn::WorkloadInfo info;
7909     AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
7910     AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
7911     AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
7912
7913     data.m_ViewOrigins.push_back(window1);
7914     data.m_ViewOrigins.push_back(window2);
7915
7916     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConcat(data, info);
7917
7918     inputHandle1->Allocate();
7919     inputHandle2->Allocate();
7920     outputHandle->Allocate();
7921
7922     CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
7923     CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
7924
7925     workload->PostAllocationConfigure();
7926     workload->Execute();
7927
7928     CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
7929
7930     return ret;
7931 }
7932
7933 LayerTestResult<uint16_t, 3> ConcatUint16Test(
7934         armnn::IWorkloadFactory& workloadFactory,
7935         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7936 {
7937     unsigned int outputWidth = 3;
7938     unsigned int outputHeight = 6;
7939     unsigned int outputChannels = 3;
7940
7941     unsigned int inputWidth1 = 3;
7942     unsigned int inputHeight1 = 6;
7943     unsigned int inputChannels1 = 2;
7944
7945     unsigned int inputWidth2 = 3;
7946     unsigned int inputHeight2 = 6;
7947     unsigned int inputChannels2 = 1;
7948
7949     // Defines the tensor descriptors.
7950     armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::QuantisedSymm16);
7951     armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::QuantisedSymm16);
7952     armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::QuantisedSymm16);
7953
7954     // Arbitrary scale and offsets. They don't really matter as the Concat operator doesn't dequantize/quantize them.
7955     const float scale = 0.13497836f;
7956     const int32_t offset = -7;
7957
7958     outputTensorInfo.SetQuantizationScale(scale);
7959     outputTensorInfo.SetQuantizationOffset(offset);
7960     inputTensorInfo1.SetQuantizationScale(scale);
7961     inputTensorInfo1.SetQuantizationOffset(offset);
7962     inputTensorInfo2.SetQuantizationScale(scale);
7963     inputTensorInfo2.SetQuantizationOffset(offset);
7964
7965     LayerTestResult<uint16_t, 3> ret(outputTensorInfo);
7966
7967     ret.outputExpected = MakeTensor<uint16_t, 3>(outputTensorInfo, std::vector<uint16_t>(
7968     {
7969         1, 2, 3,
7970         4, 5, 6,
7971         7, 8, 9,
7972         10, 11, 12,
7973         13, 14, 15,
7974         16, 17, 18,
7975
7976         19, 20, 21,
7977         22, 23, 24,
7978         25, 26, 27,
7979         28, 29, 30,
7980         31, 32, 33,
7981         34, 35, 36,
7982
7983         37, 38, 39,
7984         40, 41, 42,
7985         43, 44, 45,
7986         46, 47, 48,
7987         49, 50, 51,
7988         52, 53, 54,
7989     }));
7990
7991     auto input1 = MakeTensor<uint16_t, 3>(inputTensorInfo1, std::vector<uint16_t>(
7992     {
7993         1, 2, 3,
7994         4, 5, 6,
7995         7, 8, 9,
7996         10, 11, 12,
7997         13, 14, 15,
7998         16, 17, 18,
7999
8000         19, 20, 21,
8001         22, 23, 24,
8002         25, 26, 27,
8003         28, 29, 30,
8004         31, 32, 33,
8005         34, 35, 36,
8006     }));
8007
8008     auto input2 = MakeTensor<uint16_t, 3>(inputTensorInfo2, std::vector<uint16_t>(
8009     {
8010         37, 38, 39,
8011         40, 41, 42,
8012         43, 44, 45,
8013         46, 47, 48,
8014         49, 50, 51,
8015         52, 53, 54,
8016     }));
8017
8018     std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
8019     armnn::ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
8020
8021     std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
8022     armnn::ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
8023
8024
8025     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
8026
8027     bool subTensorsSupported = workloadFactory.SupportsSubTensors();
8028
8029     std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
8030             subTensorsSupported ?
8031             workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
8032             workloadFactory.CreateTensorHandle(inputTensorInfo1);
8033
8034     std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
8035             subTensorsSupported ?
8036             workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
8037             workloadFactory.CreateTensorHandle(inputTensorInfo2);
8038
8039
8040     armnn::ConcatQueueDescriptor data;
8041     armnn::WorkloadInfo info;
8042     AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
8043     AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
8044     AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
8045
8046     data.m_ViewOrigins.push_back(window1);
8047     data.m_ViewOrigins.push_back(window2);
8048
8049     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConcat(data, info);
8050
8051     inputHandle1->Allocate();
8052     inputHandle2->Allocate();
8053     outputHandle->Allocate();
8054
8055     CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
8056     CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
8057
8058     workload->PostAllocationConfigure();
8059     workload->Execute();
8060
8061     CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
8062
8063     return ret;
8064 }
8065
8066 namespace
8067 {
8068 template <typename T>
8069 LayerTestResult<T, 4> AdditionQuantizeTestHelper(
8070     armnn::IWorkloadFactory& workloadFactory,
8071     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8072     const unsigned int shape0[4],
8073     const std::vector<T>& values0,
8074     float scale0,
8075     int32_t offset0,
8076     const unsigned int shape1[4],
8077     const std::vector<T> & values1,
8078     float scale1,
8079     int32_t offset1,
8080     const unsigned int outShape[4],
8081     const std::vector<T> & outValues,
8082     float outScale,
8083     int32_t outOffset)
8084 {
8085     auto dataType = (std::is_same<T, uint8_t>::value ?
8086                      armnn::DataType::QuantisedAsymm8 :
8087                      armnn::DataType::QuantisedSymm16);
8088
8089     armnn::TensorInfo inputTensorInfo0(4, shape0, dataType);
8090     armnn::TensorInfo inputTensorInfo1(4, shape1, dataType);
8091     armnn::TensorInfo outputTensorInfo(4, outShape, dataType);
8092
8093     inputTensorInfo0.SetQuantizationScale(scale0);
8094     inputTensorInfo0.SetQuantizationOffset(offset0);
8095
8096     inputTensorInfo1.SetQuantizationScale(scale1);
8097     inputTensorInfo1.SetQuantizationOffset(offset1);
8098
8099     outputTensorInfo.SetQuantizationScale(outScale);
8100     outputTensorInfo.SetQuantizationOffset(outOffset);
8101
8102     auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
8103     auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
8104
8105     LayerTestResult<T, 4> result(outputTensorInfo);
8106     result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
8107
8108     std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
8109     std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
8110     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
8111
8112     armnn::AdditionQueueDescriptor data;
8113     armnn::WorkloadInfo info;
8114     AddInputToWorkload(data,  info, inputTensorInfo0, inputHandle0.get());
8115     AddInputToWorkload(data,  info, inputTensorInfo1, inputHandle1.get());
8116     AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
8117
8118     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
8119
8120     inputHandle0->Allocate();
8121     inputHandle1->Allocate();
8122     outputHandle->Allocate();
8123
8124     CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
8125     CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
8126
8127     workload->PostAllocationConfigure();
8128     workload->Execute();
8129
8130     CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
8131
8132     return result;
8133 }
8134 } // anonymous namespace
8135
8136 LayerTestResult<uint8_t, 4> AdditionUint8Test(
8137     armnn::IWorkloadFactory& workloadFactory,
8138     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8139 {
8140     const unsigned int shape0[] = { 1, 2, 2, 3 };
8141     const unsigned int shape1[] = { 1, 2, 2, 3 };
8142
8143     std::vector<uint8_t> input0(
8144     {
8145         63,  35,  77,  70,  56, 112, //  420, 224,  518,  469,  371, 763
8146         203,  28, 252, 168, 245,  91  // 1400, 175, 1743, 1155, 1694, 616
8147     });
8148
8149     std::vector<uint8_t> input1(
8150     {
8151         21,   7, 175, 231, 175, 210, // 126,   28, 1204, 1596, 1204, 1449
8152         126, 161,  63,  21, 105, 126  // 861, 1106,  420,  126,  714,  861
8153     });
8154
8155     std::vector<uint8_t> output(
8156     {
8157         81,  39, 249, 255, 228, 255, //  546,  252, 1722, 2065(clamped), 1575, 2212(clamped)
8158         255, 186, 255, 186, 255, 214, // 2261(clamped), 1281, 2163(clamped), 1281, 2408(clamped), 1477
8159     });
8160
8161     return AdditionQuantizeTestHelper(workloadFactory,
8162                                       memoryManager,
8163                                       shape0, input0, 7.0f, 3,
8164                                       shape1, input1, 7.0f, 3,
8165                                       shape0, output, 7.0f, 3);
8166 }
8167
8168 LayerTestResult<int16_t, 4> AdditionInt16Test(
8169     armnn::IWorkloadFactory& workloadFactory,
8170     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8171 {
8172     const unsigned int shape0[] = { 1, 2, 2, 3 };
8173     const unsigned int shape1[] = { 1, 2, 2, 3 };
8174
8175     std::vector<int16_t> input0(
8176         {
8177             63,  35,  77,  70,  56, 112, //  441, 245,  539,  490,  392, 184
8178             203,  28, 252, 168, 245,  91  // 1421, 196, 1764, 1176, 1715, 637
8179         });
8180
8181     std::vector<int16_t> input1(
8182         {
8183             21,   7, 175, 231, 175, 210, // 126,   28, 1204, 1596, 1204, 1449
8184             126, 161,  63,  21, 105, 126  // 861, 1106,  420,  126,  714,  861
8185         });
8186
8187     std::vector<int16_t> output(
8188         {
8189             84,  42, 252, 301, 231, 322, //  588,  294, 1764, 2107(clamped), 1617, 2254(clamped)
8190             329, 189, 315, 189, 350, 217, // 2303(clamped), 1323, 2205(clamped), 1323, 2450(clamped), 1519
8191         });
8192
8193     return AdditionQuantizeTestHelper(workloadFactory,
8194                                       memoryManager,
8195                                       shape0, input0, 7.0f, 0,
8196                                       shape1, input1, 7.0f, 0,
8197                                       shape0, output, 7.0f, 0);
8198 }
8199
8200 namespace
8201 {
8202 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
8203 LayerTestResult<T, 4> MultiplicationQuantizeTestHelper(
8204     armnn::IWorkloadFactory& workloadFactory,
8205     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8206     const unsigned int shape0[4],
8207     const std::vector<T> & values0,
8208     float scale0,
8209     int32_t offset0,
8210     const unsigned int shape1[4],
8211     const std::vector<T> & values1,
8212     float scale1,
8213     int32_t offset1,
8214     const unsigned int outShape[4],
8215     const std::vector<T> & outValues,
8216     float outScale,
8217     int32_t outOffset)
8218 {
8219     armnn::TensorInfo inputTensorInfo0(4, shape0, ArmnnType);
8220     armnn::TensorInfo inputTensorInfo1(4, shape1, ArmnnType);
8221     armnn::TensorInfo outputTensorInfo(4, outShape, ArmnnType);
8222
8223     inputTensorInfo0.SetQuantizationScale(scale0);
8224     inputTensorInfo0.SetQuantizationOffset(offset0);
8225
8226     inputTensorInfo1.SetQuantizationScale(scale1);
8227     inputTensorInfo1.SetQuantizationOffset(offset1);
8228
8229     outputTensorInfo.SetQuantizationScale(outScale);
8230     outputTensorInfo.SetQuantizationOffset(outOffset);
8231
8232     auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
8233     auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
8234
8235     LayerTestResult<T, 4> result(outputTensorInfo);
8236     result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
8237
8238     std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
8239     std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
8240     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
8241
8242     armnn::MultiplicationQueueDescriptor data;
8243     armnn::WorkloadInfo info;
8244     AddInputToWorkload(data,  info, inputTensorInfo0, inputHandle0.get());
8245     AddInputToWorkload(data,  info, inputTensorInfo1, inputHandle1.get());
8246     AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
8247
8248     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
8249
8250     inputHandle0->Allocate();
8251     inputHandle1->Allocate();
8252     outputHandle->Allocate();
8253
8254     CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
8255     CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
8256
8257     workload->PostAllocationConfigure();
8258     workload->Execute();
8259
8260     CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
8261
8262     return result;
8263 }
8264 } // anonymous namespace
8265
8266 LayerTestResult<uint8_t, 4> MultiplicationUint8Test(
8267     armnn::IWorkloadFactory& workloadFactory,
8268     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8269 {
8270     unsigned int batchSize = 1;
8271     unsigned int channels = 2;
8272     unsigned int height = 2;
8273     unsigned int width = 3;
8274     const unsigned int shape[] = { batchSize, channels, height, width };
8275
8276     // See dequantized values to the right.
8277     std::vector<uint8_t> input0({
8278          62,  37,   3, 172,  13, 111, // 244, 144,   8, 684,  48, 440,
8279         188,  20,  73,  31,  23,  31  // 748,  76, 288, 120,  88, 120
8280     });
8281
8282     // See dequantized values to the right.
8283     std::vector<uint8_t> input1({
8284         126, 240, 252, 183, 121, 247, // 384, 726, 762, 555, 369, 747,
8285          48, 115, 151,  79,  78,  97  // 150, 351, 459, 243, 240, 297
8286     });
8287
8288     // See dequantized values to the right.
8289     std::vector<uint8_t> output(
8290     {
8291          64,  72,   0, 255,   8, 236, //  93696, 104544, 6096(clamped), 379620(clamped), 17712, 328680,
8292          77,  15,  92,  16,  10,  21, // 112200,  26676,        132192,           29160, 21120,  35640
8293     });
8294
8295     // Scale/offset chosen to have output values out of range.
8296     return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
8297                                                                               memoryManager,
8298                                                                               shape,
8299                                                                               input0,
8300                                                                               4.0f,
8301                                                                               1,
8302                                                                               shape,
8303                                                                               input1,
8304                                                                               3.0f,
8305                                                                               -2,
8306                                                                               shape,
8307                                                                               output,
8308                                                                               1366.255f,
8309                                                                               -5);
8310 }
8311
8312 LayerTestResult<uint8_t, 4> MultiplicationBroadcast1ElementUint8Test(
8313     armnn::IWorkloadFactory& workloadFactory,
8314     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8315 {
8316     const unsigned int shape0[] = { 1, 2, 2, 3 };
8317     const unsigned int shape1[] = { 1, 1, 1, 1 };
8318
8319     std::vector<uint8_t> input0({
8320         1, 2, 3,    4,  5,  6,
8321         7, 8, 9,   10, 11, 12
8322     });
8323
8324     std::vector<uint8_t> input1({2});
8325
8326     std::vector<uint8_t> output({
8327         2,  4,   6,     8, 10, 12,
8328         14, 16, 18,    20, 22, 24
8329     });
8330
8331     return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
8332                                                                               memoryManager,
8333                                                                               shape0,
8334                                                                               input0,
8335                                                                               1.0f,
8336                                                                               0,
8337                                                                               shape1,
8338                                                                               input1,
8339                                                                               1.0f,
8340                                                                               0,
8341                                                                               shape0,
8342                                                                               output,
8343                                                                               1.0f,
8344                                                                               0);
8345 }
8346
8347 LayerTestResult<uint8_t, 4> MultiplicationBroadcast1DVectorUint8Test(
8348     armnn::IWorkloadFactory& workloadFactory,
8349     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8350 {
8351     const unsigned int shape0[] = { 1, 2, 2, 3 };
8352     const unsigned int shape1[] = { 1, 1, 1, 3 };
8353
8354     std::vector<uint8_t> input0({
8355         1, 2, 3,    4,  5,  6,
8356         7, 8, 9,   10, 11, 12
8357     });
8358
8359     std::vector<uint8_t> input1({1, 2, 3});
8360
8361     std::vector<uint8_t> output({
8362         1,  4,   9,     4, 10, 18,
8363         7, 16,  27,    10, 22, 36
8364     });
8365
8366     return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
8367                                                                               memoryManager,
8368                                                                               shape0,
8369                                                                               input0,
8370                                                                               1.0f,
8371                                                                               0,
8372                                                                               shape1,
8373                                                                               input1,
8374                                                                               1.0f,
8375                                                                               0,
8376                                                                               shape0,
8377                                                                               output,
8378                                                                               1.0f,
8379                                                                               0);
8380 }
8381
8382 LayerTestResult<int16_t, 4> MultiplicationInt16Test(
8383     armnn::IWorkloadFactory& workloadFactory,
8384     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8385 {
8386     const unsigned int shape[] = { 1, 2, 2, 3 };
8387
8388     std::vector<int16_t> input0(
8389     {
8390         6,   7,  8,  9, 10, 11,
8391         12, 13, 14, 15, 16, 17
8392     });
8393
8394     std::vector<int16_t> input1(
8395     {
8396         1, 2, 3,  4,  5,  6,
8397         7, 8, 9, 10, 11, 12
8398     });
8399
8400     std::vector<int16_t> output(
8401     {
8402         6,   14,  24,  36,  50,  66,
8403         84, 104, 126, 150, 176, 204
8404     });
8405
8406     return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
8407                                                                               memoryManager,
8408                                                                               shape,
8409                                                                               input0,
8410                                                                               1.0f,
8411                                                                               0,
8412                                                                               shape,
8413                                                                               input1,
8414                                                                               1.0f,
8415                                                                               0,
8416                                                                               shape,
8417                                                                               output,
8418                                                                               1.0f,
8419                                                                               0);
8420 }
8421
8422 LayerTestResult<int16_t, 4> MultiplicationBroadcast1ElementInt16Test(
8423     armnn::IWorkloadFactory& workloadFactory,
8424     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8425 {
8426     const unsigned int shape0[] = { 1, 2, 2, 3 };
8427     const unsigned int shape1[] = { 1, 1, 1, 1 };
8428
8429     std::vector<int16_t> input0(
8430     {
8431         1, 2, 3,  4,  5,  6,
8432         7, 8, 9, 10, 11, 12
8433     });
8434
8435     std::vector<int16_t> input1({2});
8436
8437     std::vector<int16_t> output(
8438     {
8439         2,   4,  6,  8, 10, 12,
8440         14, 16, 18, 20, 22, 24
8441     });
8442
8443     return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
8444                                                                               memoryManager,
8445                                                                               shape0,
8446                                                                               input0,
8447                                                                               1.0f,
8448                                                                               0,
8449                                                                               shape1,
8450                                                                               input1,
8451                                                                               1.0f,
8452                                                                               0,
8453                                                                               shape0,
8454                                                                               output,
8455                                                                               1.0f,
8456                                                                               0);
8457 }
8458
8459 LayerTestResult<int16_t, 4> MultiplicationBroadcast1DVectorInt16Test(
8460     armnn::IWorkloadFactory& workloadFactory,
8461     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8462 {
8463     const unsigned int shape0[] = { 1, 2, 2, 3 };
8464     const unsigned int shape1[] = { 1, 1, 1, 3 };
8465
8466     std::vector<int16_t> input0(
8467     {
8468         1, 2, 3,  4,  5,  6,
8469         7, 8, 9, 10, 11, 12
8470     });
8471
8472     std::vector<int16_t> input1({1, 2, 3});
8473
8474     std::vector<int16_t> output(
8475     {
8476         1,  4,  9,  4, 10, 18,
8477         7, 16, 27, 10, 22, 36
8478     });
8479
8480     return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
8481                                                                               memoryManager,
8482                                                                               shape0,
8483                                                                               input0,
8484                                                                               1.0f,
8485                                                                               0,
8486                                                                               shape1,
8487                                                                               input1,
8488                                                                               1.0f,
8489                                                                               0,
8490                                                                               shape0,
8491                                                                               output,
8492                                                                               1.0f,
8493                                                                               0);
8494 }
8495
8496 namespace
8497 {
8498 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
8499 LayerTestResult<T, 4> SubtractionTestHelper(
8500     armnn::IWorkloadFactory& workloadFactory,
8501     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8502     const unsigned int shape0[4],
8503     const std::vector<T>& values0,
8504     float scale0,
8505     int32_t offset0,
8506     const unsigned int shape1[4],
8507     const std::vector<T> & values1,
8508     float scale1,
8509     int32_t offset1,
8510     const unsigned int outShape[4],
8511     const std::vector<T> & outValues,
8512     float outScale,
8513     int32_t outOffset)
8514 {
8515     armnn::TensorInfo inputTensorInfo0(4, shape0, ArmnnType);
8516     armnn::TensorInfo inputTensorInfo1(4, shape1, ArmnnType);
8517     armnn::TensorInfo outputTensorInfo(4, outShape, ArmnnType);
8518
8519     inputTensorInfo0.SetQuantizationScale(scale0);
8520     inputTensorInfo0.SetQuantizationOffset(offset0);
8521
8522     inputTensorInfo1.SetQuantizationScale(scale1);
8523     inputTensorInfo1.SetQuantizationOffset(offset1);
8524
8525     outputTensorInfo.SetQuantizationScale(outScale);
8526     outputTensorInfo.SetQuantizationOffset(outOffset);
8527
8528     auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
8529     auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
8530
8531     LayerTestResult<T, 4> result(outputTensorInfo);
8532     result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
8533
8534     std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
8535     std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
8536     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
8537
8538     armnn::SubtractionQueueDescriptor data;
8539     armnn::WorkloadInfo info;
8540     AddInputToWorkload(data,  info, inputTensorInfo0, inputHandle0.get());
8541     AddInputToWorkload(data,  info, inputTensorInfo1, inputHandle1.get());
8542     AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
8543
8544     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateSubtraction(data, info);
8545
8546     inputHandle0->Allocate();
8547     inputHandle1->Allocate();
8548     outputHandle->Allocate();
8549
8550     CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
8551     CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
8552
8553     workload->PostAllocationConfigure();
8554     workload->Execute();
8555
8556     CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
8557
8558     return result;
8559 }
8560 } // anonymous namespace
8561
8562 LayerTestResult<uint8_t, 4> SubtractionUint8Test(
8563     armnn::IWorkloadFactory& workloadFactory,
8564     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8565 {
8566     const unsigned int shape0[] = { 1, 1, 2, 2 };
8567     const unsigned int shape1[] = { 1, 1, 2, 2 };
8568
8569     std::vector<uint8_t> input0({ 10, 12, 14, 16 });
8570     std::vector<uint8_t> input1({ 1, 2, 1, 2 });
8571     std::vector<uint8_t> output({ 3, 3, 5, 5 });
8572
8573     return SubtractionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
8574                                                                    memoryManager,
8575                                                                    shape0, input0, 0.5f, 2,
8576                                                                    shape1, input1, 1.0f, 0,
8577                                                                    shape0, output, 1.0f, 0);
8578 }
8579
8580 LayerTestResult<uint8_t, 4> SubtractionBroadcast1ElementUint8Test(
8581     armnn::IWorkloadFactory& workloadFactory,
8582     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8583 {
8584     const unsigned int shape0[] = { 1, 1, 2, 2 };
8585     const unsigned int shape1[] = { 1, 1, 1, 1 };
8586
8587     std::vector<uint8_t> input0({ 10, 12, 14, 16 });
8588     std::vector<uint8_t> input1({ 2 });
8589     std::vector<uint8_t> output({ 5, 6, 7, 8 });
8590
8591     return SubtractionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
8592                                                                    memoryManager,
8593                                                                    shape0, input0, 0.5f, 2,
8594                                                                    shape1, input1, 1.0f, 0,
8595                                                                    shape0, output, 1.0f, 3);
8596 }
8597
8598 LayerTestResult<uint8_t, 4> SubtractionBroadcastUint8Test(
8599     armnn::IWorkloadFactory& workloadFactory,
8600     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8601 {
8602     const unsigned int shape0[] = { 1, 1, 2, 2 };
8603     const unsigned int shape1[] = { 1, 1, 2, 1 };
8604
8605     std::vector<uint8_t> input0({ 10, 12, 14, 16 });
8606     std::vector<uint8_t> input1({ 2, 1 });
8607     std::vector<uint8_t> output({ 8, 11, 12, 15 });
8608
8609     return SubtractionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
8610                                                                    memoryManager,
8611                                                                    shape0, input0, 1.0f, 0,
8612                                                                    shape1, input1, 1.0f, 0,
8613                                                                    shape0, output, 1.0f, 0);
8614 }
8615
8616 LayerTestResult<float, 4> SubtractionTest(
8617     armnn::IWorkloadFactory& workloadFactory,
8618     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8619 {
8620     const unsigned int shape0[] = { 1, 1, 2, 2 };
8621     const unsigned int shape1[] = { 1, 1, 2, 2 };
8622
8623     std::vector<float> input0({ 1,  2, 3, 4 });
8624     std::vector<float> input1({ 1, -1, 0, 2 });
8625     std::vector<float> output({ 0,  3, 3, 2 });
8626
8627     return SubtractionTestHelper<armnn::DataType::Float32>(workloadFactory,
8628                                                            memoryManager,
8629                                                            shape0, input0, 1.0f, 0,
8630                                                            shape1, input1, 1.0f, 0,
8631                                                            shape0, output, 1.0f, 0);
8632 }
8633
8634 LayerTestResult<float, 4> SubtractionBroadcast1ElementTest(
8635     armnn::IWorkloadFactory& workloadFactory,
8636     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8637 {
8638     const unsigned int shape0[] = { 1, 1, 2, 2 };
8639     const unsigned int shape1[] = { 1, 1, 1, 1 };
8640
8641     std::vector<float> input0({ 1,  2, 3, 4 });
8642     std::vector<float> input1({ 10 });
8643     std::vector<float> output({ -9,  -8, -7, -6 });
8644
8645     return SubtractionTestHelper<armnn::DataType::Float32>(workloadFactory,
8646                                                            memoryManager,
8647                                                            shape0, input0, 1.0f, 0,
8648                                                            shape1, input1, 1.0f, 0,
8649                                                            shape0, output, 1.0f, 0);
8650 }
8651
8652 LayerTestResult<float, 4> SubtractionBroadcastTest(
8653     armnn::IWorkloadFactory& workloadFactory,
8654     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8655 {
8656     const unsigned int shape0[] = { 1, 1, 2, 2 };
8657     const unsigned int shape1[] = { 1, 1, 1, 2 };
8658
8659     std::vector<float> input0({ 1,  2, 3, 4 });
8660     std::vector<float> input1({ 10, -5 });
8661     std::vector<float> output({ -9,  7, -7, 9 });
8662
8663     return SubtractionTestHelper<armnn::DataType::Float32>(workloadFactory,
8664                                                            memoryManager,
8665                                                            shape0, input0, 1.0f, 0,
8666                                                            shape1, input1, 1.0f, 0,
8667                                                            shape0, output, 1.0f, 0);
8668 }
8669
8670 LayerTestResult<int16_t, 4> SubtractionInt16Test(
8671     armnn::IWorkloadFactory& workloadFactory,
8672     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8673 {
8674     const unsigned int shape0[] = { 1, 1, 2, 2 };
8675     const unsigned int shape1[] = { 1, 1, 2, 2 };
8676
8677     std::vector<int16_t> input0({ 10, 12, 14, 16 });
8678     std::vector<int16_t> input1({ 1, 2, 1, 2 });
8679     std::vector<int16_t> output({ 3, 3, 5, 5 });
8680
8681     return SubtractionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
8682                                                                    memoryManager,
8683                                                                    shape0, input0, 0.5f, 0,
8684                                                                    shape1, input1, 1.0f, 0,
8685                                                                    shape0, output, 1.0f, 0);
8686 }
8687
8688 LayerTestResult<int16_t, 4> SubtractionBroadcast1ElementInt16Test(
8689     armnn::IWorkloadFactory& workloadFactory,
8690     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8691 {
8692     const unsigned int shape0[] = { 1, 1, 2, 2 };
8693     const unsigned int shape1[] = { 1, 1, 1, 1 };
8694
8695     std::vector<int16_t> input0({ 10, 12, 14, 16 });
8696     std::vector<int16_t> input1({ 2 });
8697     std::vector<int16_t> output({ 3, 4, 5, 6 });
8698
8699     return SubtractionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
8700                                                                    memoryManager,
8701                                                                    shape0, input0, 0.5f, 0,
8702                                                                    shape1, input1, 1.0f, 0,
8703                                                                    shape0, output, 1.0f, 0);
8704 }
8705
8706 LayerTestResult<int16_t, 4> SubtractionBroadcastInt16Test(
8707     armnn::IWorkloadFactory& workloadFactory,
8708     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8709 {
8710     const unsigned int shape0[] = { 1, 1, 2, 2 };
8711     const unsigned int shape1[] = { 1, 1, 2, 1 };
8712
8713     std::vector<int16_t> input0({ 10, 12, 14, 16 });
8714     std::vector<int16_t> input1({ 2, 1 });
8715     std::vector<int16_t> output({ 8, 11, 12, 15 });
8716
8717     return SubtractionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
8718                                                                    memoryManager,
8719                                                                    shape0, input0, 1.0f, 0,
8720                                                                    shape1, input1, 1.0f, 0,
8721                                                                    shape0, output, 1.0f, 0);
8722 }
8723
8724 LayerTestResult<float, 4> BatchNormTest(
8725     armnn::IWorkloadFactory& workloadFactory,
8726     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8727 {
8728     // BatchSize: 1
8729     // Channels: 2
8730     // Height: 3
8731     // Width: 2
8732
8733     const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
8734     std::vector<float> inputValues
8735     {
8736         // Batch 0, Channel 0, Height (3) x Width (2)
8737          1.f, 4.f,
8738          4.f, 2.f,
8739          1.f, 6.f,
8740
8741         // Batch 0, Channel 1, Height (3) x Width (2)
8742          1.f, 1.f,
8743          4.f, 1.f,
8744         -2.f, 4.f
8745     };
8746     std::vector<float> expectedOutputValues
8747     {
8748         // Batch 0, Channel 0, Height (3) x Width (2)
8749         1.f, 4.f,
8750         4.f, 2.f,
8751         1.f, 6.f,
8752
8753         // Batch 0, Channel 1, Height (3) x Width (2)
8754         3.f, 3.f,
8755         4.f, 3.f,
8756         2.f, 4.f
8757     };
8758
8759     return BatchNormTestImpl<armnn::DataType::Float32>(
8760         workloadFactory, memoryManager,
8761         inputOutputShape, inputValues, expectedOutputValues,
8762         0.f, 0, armnn::DataLayout::NCHW);
8763 }
8764
8765 LayerTestResult<float, 4> BatchNormNhwcTest(
8766     armnn::IWorkloadFactory& workloadFactory,
8767     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8768 {
8769     // BatchSize: 1
8770     // Height: 3
8771     // Width: 2
8772     // Channels: 2
8773
8774     const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
8775     std::vector<float> inputValues
8776     {
8777         // Batch 0, Height 0, Width (2) x Channel (2)
8778         1.f,  1.f,
8779         4.f,  1.f,
8780
8781         // Batch 0, Height 1, Width (2) x Channel (2)
8782         4.f,  4.f,
8783         2.f,  1.f,
8784
8785         // Batch 0, Height 2, Width (2) x Channel (2)
8786         1.f, -2.f,
8787         6.f,  4.f
8788     };
8789     std::vector<float> expectedOutputValues
8790     {
8791         // Batch 0, Height 0, Width (2) x Channel (2)
8792         1.f, 3.f,
8793         4.f, 3.f,
8794
8795         // Batch 0, Height 1, Width (2) x Channel (2)
8796         4.f, 4.f,
8797         2.f, 3.f,
8798
8799         // Batch 0, Height 2, Width (2) x Channel (2)
8800         1.f, 2.f,
8801         6.f, 4.f
8802     };
8803
8804     return BatchNormTestImpl<armnn::DataType::Float32>(
8805         workloadFactory, memoryManager,
8806         inputOutputShape, inputValues, expectedOutputValues,
8807         0.f, 0, armnn::DataLayout::NHWC);
8808 }
8809
8810 LayerTestResult<uint8_t, 4> BatchNormUint8Test(
8811     armnn::IWorkloadFactory& workloadFactory,
8812     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8813 {
8814     // BatchSize: 1
8815     // Channels: 2
8816     // Height: 3
8817     // Width: 2
8818
8819     const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
8820     std::vector<float> inputValues
8821     {
8822         // Batch 0, Channel 0, Height (3) x Width (2)
8823          1.f, 4.f,
8824          4.f, 2.f,
8825          1.f, 6.f,
8826
8827         // Batch 0, Channel 1, Height (3) x Width (2)
8828          1.f, 1.f,
8829          4.f, 1.f,
8830         -2.f, 4.f
8831     };
8832     std::vector<float> expectedOutputValues
8833     {
8834         // Batch 0, Channel 0, Height (3) x Width (2)
8835         1.f, 4.f,
8836         4.f, 2.f,
8837         1.f, 6.f,
8838
8839         // Batch 0, Channel 1, Height (3) x Width (2)
8840         3.f, 3.f,
8841         4.f, 3.f,
8842         2.f, 4.f
8843     };
8844
8845     return BatchNormTestImpl<armnn::DataType::QuantisedAsymm8>(
8846         workloadFactory, memoryManager,
8847         inputOutputShape, inputValues, expectedOutputValues,
8848         1.f/20.f, 50, armnn::DataLayout::NCHW);
8849 }
8850
8851 LayerTestResult<uint8_t, 4> BatchNormUint8NhwcTest(
8852     armnn::IWorkloadFactory& workloadFactory,
8853     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8854 {
8855     // BatchSize: 1
8856     // Height: 3
8857     // Width: 2
8858     // Channels: 2
8859
8860     const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
8861     std::vector<float> inputValues
8862     {
8863         // Batch 0, Height 0, Width (2) x Channel (2)
8864         1.f,  1.f,
8865         4.f,  1.f,
8866
8867         // Batch 0, Height 1, Width (2) x Channel (2)
8868         4.f,  4.f,
8869         2.f,  1.f,
8870
8871         // Batch 0, Height 2, Width (2) x Channel (2)
8872         1.f, -2.f,
8873         6.f,  4.f
8874     };
8875     std::vector<float> expectedOutputValues
8876     {
8877         // Batch 0, Height 0, Width (2) x Channel (2)
8878         1.f, 3.f,
8879         4.f, 3.f,
8880
8881         // Batch 0, Height 1, Width (2) x Channel (2)
8882         4.f, 4.f,
8883         2.f, 3.f,
8884
8885         // Batch 0, Height 2, Width (2) x Channel (2)
8886         1.f, 2.f,
8887         6.f, 4.f
8888     };
8889
8890     return BatchNormTestImpl<armnn::DataType::QuantisedAsymm8>
8891         (workloadFactory, memoryManager,
8892          inputOutputShape, inputValues, expectedOutputValues,
8893          1.f/20.f, 50, armnn::DataLayout::NHWC);
8894 }
8895
8896 LayerTestResult<int16_t, 4> BatchNormInt16Test(
8897     armnn::IWorkloadFactory& workloadFactory,
8898     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8899 {
8900     // BatchSize: 1
8901     // Channels: 2
8902     // Height: 3
8903     // Width: 2
8904
8905     const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
8906     std::vector<float> inputValues
8907     {
8908         // Batch 0, Channel 0, Height (3) x Width (2)
8909          1.f, 4.f,
8910          4.f, 2.f,
8911          1.f, 6.f,
8912
8913         // Batch 0, Channel 1, Height (3) x Width (2)
8914          1.f, 1.f,
8915          4.f, 1.f,
8916         -2.f, 4.f
8917     };
8918     std::vector<float> expectedOutputValues
8919     {
8920         // Batch 0, Channel 0, Height (3) x Width (2)
8921         1.f, 4.f,
8922         4.f, 2.f,
8923         1.f, 6.f,
8924
8925         // Batch 0, Channel 1, Height (3) x Width (2)
8926         3.f, 3.f,
8927         4.f, 3.f,
8928         2.f, 4.f
8929     };
8930
8931     return BatchNormTestImpl<armnn::DataType::QuantisedSymm16>(
8932         workloadFactory, memoryManager,
8933         inputOutputShape, inputValues, expectedOutputValues,
8934         1.f/20.f, 50, armnn::DataLayout::NCHW);
8935 }
8936
8937 LayerTestResult<int16_t, 4> BatchNormInt16NhwcTest(
8938     armnn::IWorkloadFactory& workloadFactory,
8939     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8940 {
8941     // BatchSize: 1
8942     // Height: 3
8943     // Width: 2
8944     // Channels: 2
8945
8946     const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
8947     std::vector<float> inputValues
8948     {
8949         // Batch 0, Height 0, Width (2) x Channel (2)
8950         1.f,  1.f,
8951         4.f,  1.f,
8952
8953         // Batch 0, Height 1, Width (2) x Channel (2)
8954         4.f,  4.f,
8955         2.f,  1.f,
8956
8957         // Batch 0, Height 2, Width (2) x Channel (2)
8958         1.f, -2.f,
8959         6.f,  4.f
8960     };
8961     std::vector<float> expectedOutputValues
8962     {
8963         // Batch 0, Height 0, Width (2) x Channel (2)
8964         1.f, 3.f,
8965         4.f, 3.f,
8966
8967         // Batch 0, Height 1, Width (2) x Channel (2)
8968         4.f, 4.f,
8969         2.f, 3.f,
8970
8971         // Batch 0, Height 2, Width (2) x Channel (2)
8972         1.f, 2.f,
8973         6.f, 4.f
8974     };
8975
8976     return BatchNormTestImpl<armnn::DataType::QuantisedSymm16>
8977         (workloadFactory, memoryManager,
8978          inputOutputShape, inputValues, expectedOutputValues,
8979          1.f/20.f, 50, armnn::DataLayout::NHWC);
8980 }
8981
8982 LayerTestResult<uint8_t, 4> ConstantUint8CustomQuantizationScaleAndOffsetTest(
8983     armnn::IWorkloadFactory& workloadFactory,
8984     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8985 {
8986     return ConstantTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 2e-6f, 1);
8987 }
8988
8989 LayerTestResult<int16_t, 4> ConstantInt16CustomQuantizationScaleAndOffsetTest(
8990     armnn::IWorkloadFactory& workloadFactory,
8991     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8992 {
8993     return ConstantTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 2e-6f, 1);
8994 }
8995
8996 LayerTestResult<uint8_t, 1> Concatenation1dUint8Test(
8997     armnn::IWorkloadFactory& workloadFactory,
8998     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8999 {
9000     return Concatenation1dTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
9001 }
9002
9003 LayerTestResult<uint8_t, 2> Concatenation2dDim0Uint8Test(
9004     armnn::IWorkloadFactory& workloadFactory,
9005     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9006 {
9007     return Concatenation2dDim0TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
9008 }
9009
9010 LayerTestResult<uint8_t, 2> Concatenation2dDim1Uint8Test(
9011     armnn::IWorkloadFactory& workloadFactory,
9012     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9013 {
9014     return Concatenation2dDim1TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
9015 }
9016
9017 LayerTestResult<uint8_t, 2> Concatenation2dDim0DiffInputDimsUint8Test(
9018     armnn::IWorkloadFactory& workloadFactory,
9019     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9020 {
9021     return Concatenation2dDim0DiffInputDimsTestImpl<armnn::DataType::QuantisedAsymm8>(
9022         workloadFactory, memoryManager, 0.5f, -1);
9023 }
9024
9025 LayerTestResult<uint8_t, 2> Concatenation2dDim1DiffInputDimsUint8Test(
9026     armnn::IWorkloadFactory& workloadFactory,
9027     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9028 {
9029     return Concatenation2dDim1DiffInputDimsTestImpl<armnn::DataType::QuantisedAsymm8>(
9030         workloadFactory, memoryManager, 0.5f, -1);
9031 }
9032
9033 LayerTestResult<uint8_t, 3> Concatenation3dDim0Uint8Test(
9034     armnn::IWorkloadFactory& workloadFactory,
9035     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9036 {
9037     return Concatenation3dDim0TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
9038 }
9039
9040 LayerTestResult<uint8_t, 3> Concatenation3dDim1Uint8Test(
9041     armnn::IWorkloadFactory& workloadFactory,
9042     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9043 {
9044     return Concatenation3dDim1TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
9045 }
9046
9047 LayerTestResult<uint8_t, 3> Concatenation3dDim2Uint8Test(
9048     armnn::IWorkloadFactory& workloadFactory,
9049     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9050     bool useSubtensor)
9051 {
9052     return Concatenation3dDim2TestImpl<armnn::DataType::QuantisedAsymm8>(
9053         workloadFactory, memoryManager, useSubtensor, 0.5f, -1);
9054 }
9055
9056 LayerTestResult<uint8_t, 3> Concatenation3dDim0DiffInputDimsUint8Test(
9057     armnn::IWorkloadFactory& workloadFactory,
9058     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9059 {
9060     return Concatenation3dDim0TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
9061 }
9062
9063 LayerTestResult<uint8_t, 3> Concatenation3dDim1DiffInputDimsUint8Test(
9064     armnn::IWorkloadFactory& workloadFactory,
9065     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9066 {
9067     return Concatenation3dDim1DiffInputDimsTestImpl<armnn::DataType::QuantisedAsymm8>(
9068         workloadFactory, memoryManager, 0.5f, -1);
9069 }
9070
9071 LayerTestResult<uint8_t, 3> Concatenation3dDim2DiffInputDimsUint8Test(
9072     armnn::IWorkloadFactory& workloadFactory,
9073     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9074     bool useSubtensor)
9075 {
9076     return Concatenation3dDim2DiffInputDimsTestImpl<armnn::DataType::QuantisedAsymm8>(
9077         workloadFactory, memoryManager, useSubtensor, 0.5f, -1);
9078 }
9079
9080 LayerTestResult<uint8_t, 4> Concatenation4dDim0Uint8Test(
9081     armnn::IWorkloadFactory& workloadFactory,
9082     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9083 {
9084     return Concatenation4dDim0TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
9085 }
9086
9087 LayerTestResult<uint8_t, 4> Concatenation4dDim1Uint8Test(
9088     armnn::IWorkloadFactory& workloadFactory,
9089     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9090 {
9091     return Concatenation4dDim1TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
9092 }
9093
9094 LayerTestResult<uint8_t, 4> Concatenation4dDim2Uint8Test(
9095     armnn::IWorkloadFactory& workloadFactory,
9096     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9097 {
9098     return Concatenation4dDim2TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
9099 }
9100
9101 LayerTestResult<uint8_t, 4> Concatenation4dDim3Uint8Test(
9102     armnn::IWorkloadFactory& workloadFactory,
9103     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, bool useSubtensor)
9104 {
9105     return Concatenation4dDim3TestImpl<armnn::DataType::QuantisedAsymm8>(
9106         workloadFactory, memoryManager, 0.5f, -1, useSubtensor);
9107 }
9108
9109 LayerTestResult<uint8_t, 4> Concatenation4dDiffShapeDim0Uint8Test(
9110     armnn::IWorkloadFactory& workloadFactory,
9111     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9112 {
9113     return Concatenation4dDiffShapeDim0TestImpl<armnn::DataType::QuantisedAsymm8>(
9114         workloadFactory, memoryManager, 0.5f, -1);
9115 }
9116
9117 LayerTestResult<uint8_t, 4> Concatenation4dDiffShapeDim1Uint8Test(
9118     armnn::IWorkloadFactory& workloadFactory,
9119     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9120 {
9121     return Concatenation4dDiffShapeDim1TestImpl<armnn::DataType::QuantisedAsymm8>(
9122         workloadFactory, memoryManager, 0.5f, -1);
9123 }
9124
9125 LayerTestResult<uint8_t, 4> Concatenation4dDiffShapeDim2Uint8Test(
9126     armnn::IWorkloadFactory& workloadFactory,
9127     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9128 {
9129     return Concatenation4dDiffShapeDim2TestImpl<armnn::DataType::QuantisedAsymm8>(
9130         workloadFactory, memoryManager, 0.5f, -1);
9131 }
9132
9133 LayerTestResult<uint8_t, 4> Concatenation4dDiffShapeDim3Uint8Test(
9134     armnn::IWorkloadFactory& workloadFactory,
9135     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9136     bool useSubtensor)
9137 {
9138     return Concatenation4dDiffShapeDim3TestImpl<armnn::DataType::QuantisedAsymm8>(
9139         workloadFactory, memoryManager, 0.5f, -1, useSubtensor);
9140 }
9141
9142 LayerTestResult<float, 4> SimpleMaxPooling2dSize2x2Stride2x2Test(
9143     armnn::IWorkloadFactory& workloadFactory,
9144     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9145     bool forceNoPadding)
9146 {
9147     return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::Float32>(
9148         workloadFactory, memoryManager, forceNoPadding);
9149 }
9150
9151 LayerTestResult<uint8_t, 4> SimpleMaxPooling2dSize2x2Stride2x2Uint8Test(
9152     armnn::IWorkloadFactory& workloadFactory,
9153     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9154     bool forceNoPadding)
9155 {
9156     return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::QuantisedAsymm8>(
9157         workloadFactory, memoryManager, forceNoPadding, 3.0f, -5);
9158 }
9159
9160 LayerTestResult<int16_t, 4> SimpleMaxPooling2dSize2x2Stride2x2Int16Test(
9161     armnn::IWorkloadFactory& workloadFactory,
9162     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9163     bool forceNoPadding)
9164 {
9165     return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::QuantisedSymm16>(
9166             workloadFactory, memoryManager, forceNoPadding);
9167 }
9168
9169 LayerTestResult<float, 4> SimpleMaxPooling2dSize3x3Stride2x4Test(
9170     armnn::IWorkloadFactory& workloadFactory,
9171     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9172     bool forceNoPadding)
9173 {
9174     return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::Float32>(
9175         workloadFactory, memoryManager, forceNoPadding);
9176 }
9177
9178 LayerTestResult<uint8_t, 4> SimpleMaxPooling2dSize3x3Stride2x4Uint8Test(
9179     armnn::IWorkloadFactory& workloadFactory,
9180     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9181     bool forceNoPadding)
9182 {
9183     return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::QuantisedAsymm8>(
9184         workloadFactory, memoryManager, forceNoPadding, 0.1f, 128);
9185 }
9186
9187 LayerTestResult<int16_t, 4> SimpleMaxPooling2dSize3x3Stride2x4Int16Test(
9188     armnn::IWorkloadFactory& workloadFactory,
9189     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9190     bool forceNoPadding)
9191 {
9192     return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::QuantisedSymm16>(
9193             workloadFactory, memoryManager, forceNoPadding);
9194 }
9195
9196 LayerTestResult<float, 4> SimpleMaxPooling2dTest(
9197     armnn::IWorkloadFactory& workloadFactory,
9198     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9199     const armnn::DataLayout dataLayout)
9200 {
9201     return SimpleMaxPooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
9202 }
9203
9204 LayerTestResult<uint8_t, 4> SimpleMaxPooling2dUint8Test(
9205     armnn::IWorkloadFactory& workloadFactory,
9206     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9207     const armnn::DataLayout dataLayout)
9208 {
9209     return SimpleMaxPooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, dataLayout);
9210 }
9211
9212 LayerTestResult<int16_t, 4> SimpleMaxPooling2dInt16Test(
9213     armnn::IWorkloadFactory& workloadFactory,
9214     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9215     const armnn::DataLayout dataLayout)
9216 {
9217     return SimpleMaxPooling2dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, dataLayout);
9218 }
9219 LayerTestResult<float, 4> IgnorePaddingSimpleMaxPooling2dTest(
9220     armnn::IWorkloadFactory& workloadFactory,
9221     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9222 {
9223     return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
9224 }
9225
9226 LayerTestResult<uint8_t, 4> IgnorePaddingSimpleMaxPooling2dUint8Test(
9227     armnn::IWorkloadFactory& workloadFactory,
9228     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9229 {
9230     return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
9231             workloadFactory, memoryManager, 1.0f, -5);
9232 }
9233
9234 LayerTestResult<int16_t, 4> IgnorePaddingSimpleMaxPooling2dInt16Test(
9235     armnn::IWorkloadFactory& workloadFactory,
9236     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9237 {
9238     return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::QuantisedSymm16>(
9239             workloadFactory, memoryManager);
9240 }
9241
9242 LayerTestResult<float, 4> IgnorePaddingMaxPooling2dSize3Test(
9243     armnn::IWorkloadFactory& workloadFactory,
9244     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9245 {
9246     return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
9247 }
9248
9249 LayerTestResult<uint8_t, 4> IgnorePaddingMaxPooling2dSize3Uint8Test(
9250     armnn::IWorkloadFactory& workloadFactory,
9251     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9252 {
9253     return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::QuantisedAsymm8>(
9254             workloadFactory, memoryManager, 1.0f, -5);
9255 }
9256
9257 LayerTestResult<int16_t, 4> IgnorePaddingMaxPooling2dSize3Int16Test(
9258     armnn::IWorkloadFactory& workloadFactory,
9259     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9260 {
9261     return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::QuantisedSymm16>(
9262             workloadFactory, memoryManager);
9263 }
9264
9265 LayerTestResult<float, 4> SimpleAveragePooling2dTest(
9266     armnn::IWorkloadFactory& workloadFactory,
9267     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9268     const armnn::DataLayout dataLayout)
9269 {
9270     return SimpleAveragePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
9271 }
9272
9273 LayerTestResult<uint8_t, 4> SimpleAveragePooling2dUint8Test(
9274     armnn::IWorkloadFactory& workloadFactory,
9275     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9276     const armnn::DataLayout dataLayout)
9277 {
9278     return SimpleAveragePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
9279         workloadFactory, memoryManager, dataLayout, 0.5, -1);
9280 }
9281
9282 LayerTestResult<int16_t, 4> SimpleAveragePooling2dInt16Test(
9283     armnn::IWorkloadFactory& workloadFactory,
9284     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9285     const armnn::DataLayout dataLayout)
9286 {
9287     return SimpleAveragePooling2dTestCommon<armnn::DataType::QuantisedSymm16>(
9288             workloadFactory, memoryManager, dataLayout);
9289 }
9290
9291 LayerTestResult<float, 4> IgnorePaddingAveragePooling2dSize3x2Stride2x2Test(
9292     armnn::IWorkloadFactory& workloadFactory,
9293     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9294     bool forceNoPadding)
9295 {
9296     return IgnorePaddingAveragePooling2dSize3x2Stride2x2TestCommon<armnn::DataType::Float32>(
9297         workloadFactory, memoryManager, forceNoPadding);
9298 }
9299
9300 LayerTestResult<float, 4> LargeTensorsAveragePooling2dTest(
9301     armnn::IWorkloadFactory& workloadFactory,
9302     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9303 {
9304     return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
9305 }
9306
9307 LayerTestResult<uint8_t, 4> LargeTensorsAveragePooling2dUint8Test(
9308     armnn::IWorkloadFactory& workloadFactory,
9309     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9310 {
9311     return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
9312         workloadFactory, memoryManager, 0.5, -1);
9313 }
9314
9315 LayerTestResult<int16_t, 4> LargeTensorsAveragePooling2dInt16Test(
9316     armnn::IWorkloadFactory& workloadFactory,
9317     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9318 {
9319     return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::QuantisedSymm16>(
9320             workloadFactory, memoryManager);
9321 }
9322 LayerTestResult<float, 4> IgnorePaddingSimpleAveragePooling2dTest(
9323     armnn::IWorkloadFactory& workloadFactory,
9324     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9325 {
9326     return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
9327 }
9328
9329 LayerTestResult<uint8_t, 4> IgnorePaddingSimpleAveragePooling2dUint8Test(
9330     armnn::IWorkloadFactory& workloadFactory,
9331     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9332 {
9333     return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
9334             workloadFactory, memoryManager);
9335 }
9336
9337 LayerTestResult<int16_t, 4> IgnorePaddingSimpleAveragePooling2dInt16Test(
9338     armnn::IWorkloadFactory& workloadFactory,
9339     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9340 {
9341     return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::QuantisedSymm16>(
9342             workloadFactory, memoryManager);
9343 }
9344
9345 LayerTestResult<float, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingTest(
9346     armnn::IWorkloadFactory& workloadFactory,
9347     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9348 {
9349     return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::Float32>(
9350             workloadFactory, memoryManager);
9351 }
9352
9353 LayerTestResult<uint8_t, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingUint8Test(
9354     armnn::IWorkloadFactory& workloadFactory,
9355     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9356 {
9357     return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::QuantisedAsymm8>(
9358             workloadFactory, memoryManager);
9359 }
9360
9361 LayerTestResult<int16_t, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingInt16Test(
9362     armnn::IWorkloadFactory& workloadFactory,
9363     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9364 {
9365     return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::QuantisedSymm16>(
9366             workloadFactory, memoryManager);
9367 }
9368
9369 LayerTestResult<float, 4> IgnorePaddingAveragePooling2dSize3Test(
9370     armnn::IWorkloadFactory& workloadFactory,
9371     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9372 {
9373     return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
9374 }
9375
9376 LayerTestResult<uint8_t, 4> IgnorePaddingAveragePooling2dSize3Uint8Test(
9377     armnn::IWorkloadFactory& workloadFactory,
9378     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9379 {
9380     return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::QuantisedAsymm8>(
9381             workloadFactory, memoryManager);
9382 }
9383
9384 LayerTestResult<int16_t, 4> IgnorePaddingAveragePooling2dSize3Int16Test(
9385     armnn::IWorkloadFactory& workloadFactory,
9386     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9387 {
9388     return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::QuantisedSymm16>(
9389             workloadFactory, memoryManager);
9390 }
9391
9392 LayerTestResult<float, 4> SimpleL2Pooling2dTest(
9393     armnn::IWorkloadFactory& workloadFactory,
9394     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9395     const armnn::DataLayout dataLayout)
9396 {
9397     return SimpleL2Pooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
9398 }
9399
9400 LayerTestResult<uint8_t, 4> SimpleL2Pooling2dUint8Test(
9401     armnn::IWorkloadFactory& workloadFactory,
9402     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9403     const armnn::DataLayout dataLayout)
9404 {
9405     return SimpleL2Pooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, dataLayout);
9406 }
9407
9408 LayerTestResult<int16_t, 4> SimpleL2Pooling2dInt16Test(
9409     armnn::IWorkloadFactory& workloadFactory,
9410     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9411     const armnn::DataLayout dataLayout)
9412 {
9413     return SimpleL2Pooling2dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, dataLayout);
9414 }
9415
9416 LayerTestResult<float, 4> L2Pooling2dSize3Stride1Test(
9417     armnn::IWorkloadFactory& workloadFactory,
9418     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9419 {
9420     return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
9421 }
9422
9423 LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride1Uint8Test(
9424     armnn::IWorkloadFactory& workloadFactory,
9425     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9426 {
9427     return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9428 }
9429
9430 LayerTestResult<int16_t, 4> L2Pooling2dSize3Stride1Int16Test(
9431     armnn::IWorkloadFactory& workloadFactory,
9432     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9433 {
9434     return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9435 }
9436
9437 LayerTestResult<float, 4> L2Pooling2dSize3Stride3Test(
9438     armnn::IWorkloadFactory& workloadFactory,
9439     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9440 {
9441     return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
9442 }
9443
9444 LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride3Uint8Test(
9445     armnn::IWorkloadFactory& workloadFactory,
9446     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9447 {
9448     return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9449 }
9450
9451 LayerTestResult<int16_t, 4> L2Pooling2dSize3Stride3Int16Test(
9452     armnn::IWorkloadFactory& workloadFactory,
9453     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9454 {
9455     return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9456 }
9457 LayerTestResult<float, 4> L2Pooling2dSize3Stride4Test(
9458     armnn::IWorkloadFactory& workloadFactory,
9459     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9460 {
9461     return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
9462 }
9463
9464 LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride4Uint8Test(
9465     armnn::IWorkloadFactory& workloadFactory,
9466     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9467 {
9468     return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9469 }
9470
9471 LayerTestResult<int16_t, 4> L2Pooling2dSize3Stride4Int16Test(
9472     armnn::IWorkloadFactory& workloadFactory,
9473     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9474 {
9475     return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9476 }
9477
9478 LayerTestResult<float, 4> L2Pooling2dSize7Test(
9479     armnn::IWorkloadFactory& workloadFactory,
9480     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9481 {
9482     return L2Pooling2dSize7TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
9483 }
9484
9485 LayerTestResult<uint8_t, 4> L2Pooling2dSize7Uint8Test(
9486     armnn::IWorkloadFactory& workloadFactory,
9487     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9488 {
9489     return L2Pooling2dSize7TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9490 }
9491
9492 LayerTestResult<int16_t, 4> L2Pooling2dSize7Int16Test(
9493     armnn::IWorkloadFactory& workloadFactory,
9494     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9495 {
9496     return L2Pooling2dSize7TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9497 }
9498
9499 LayerTestResult<float, 4> L2Pooling2dSize9Test(
9500     armnn::IWorkloadFactory& workloadFactory,
9501     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9502 {
9503     return L2Pooling2dSize9TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
9504 }
9505
9506 LayerTestResult<uint8_t, 4> L2Pooling2dSize9Uint8Test(
9507     armnn::IWorkloadFactory& workloadFactory,
9508     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9509 {
9510     return L2Pooling2dSize9TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9511 }
9512
9513 LayerTestResult<int16_t, 4> L2Pooling2dSize9Int16Test(
9514     armnn::IWorkloadFactory& workloadFactory,
9515     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9516 {
9517     return L2Pooling2dSize9TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9518 }
9519 LayerTestResult<float, 4> IgnorePaddingSimpleL2Pooling2dTest(
9520     armnn::IWorkloadFactory& workloadFactory,
9521     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9522 {
9523     return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
9524 }
9525
9526 LayerTestResult<uint8_t, 4> IgnorePaddingSimpleL2Pooling2dUint8Test(
9527     armnn::IWorkloadFactory& workloadFactory,
9528     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9529 {
9530     return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9531 }
9532
9533 LayerTestResult<int16_t, 4> IgnorePaddingSimpleL2Pooling2dInt16Test(
9534     armnn::IWorkloadFactory& workloadFactory,
9535     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9536 {
9537     return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9538 }
9539
9540 LayerTestResult<float, 4> IgnorePaddingL2Pooling2dSize3Test(
9541     armnn::IWorkloadFactory& workloadFactory,
9542     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9543 {
9544     return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
9545 }
9546
9547 LayerTestResult<uint8_t, 4> IgnorePaddingL2Pooling2dSize3Uint8Test(
9548     armnn::IWorkloadFactory& workloadFactory,
9549     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9550 {
9551     return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9552 }
9553
9554 LayerTestResult<int16_t, 4> IgnorePaddingL2Pooling2dSize3Int16Test(
9555     armnn::IWorkloadFactory& workloadFactory,
9556     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9557 {
9558     return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9559 }
9560
9561 LayerTestResult<float, 4> AsymmetricNonSquarePooling2dTest(
9562     armnn::IWorkloadFactory& workloadFactory,
9563     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9564 {
9565     return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
9566 }
9567
9568 LayerTestResult<uint8_t, 4> AsymmetricNonSquarePooling2dUint8Test(
9569     armnn::IWorkloadFactory& workloadFactory,
9570     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9571 {
9572     return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9573 }
9574
9575 LayerTestResult<int16_t, 4> AsymmetricNonSquarePooling2dInt16Test(
9576     armnn::IWorkloadFactory& workloadFactory,
9577     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9578 {
9579     return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9580 }
9581
9582 LayerTestResult<float, 4> ComparePooling2dTest(
9583     armnn::IWorkloadFactory& workloadFactory,
9584     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9585     armnn::IWorkloadFactory& refWorkloadFactory,
9586     armnn::PoolingAlgorithm  poolingType)
9587 {
9588     return ComparePooling2dTestCommon<armnn::DataType::Float32>(
9589         workloadFactory, memoryManager, refWorkloadFactory, poolingType);
9590 }
9591
9592 LayerTestResult<uint8_t, 4> ComparePooling2dUint8Test(
9593     armnn::IWorkloadFactory& workloadFactory,
9594     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9595     armnn::IWorkloadFactory& refWorkloadFactory,
9596     armnn::PoolingAlgorithm  poolingType)
9597 {
9598     return ComparePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
9599         workloadFactory, memoryManager, refWorkloadFactory, poolingType, 0.1f, 128);
9600 }
9601
9602 LayerTestResult<int16_t, 4> ComparePooling2dInt16Test(
9603     armnn::IWorkloadFactory& workloadFactory,
9604     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9605     armnn::IWorkloadFactory& refWorkloadFactory,
9606     armnn::PoolingAlgorithm  poolingType)
9607 {
9608     return ComparePooling2dTestCommon<armnn::DataType::QuantisedSymm16>(
9609             workloadFactory, memoryManager, refWorkloadFactory, poolingType);
9610 }
9611
9612 LayerTestResult<float, 2> FullyConnectedLargeTest(
9613     armnn::IWorkloadFactory& workloadFactory,
9614     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9615     bool transposeWeights)
9616 {
9617     return FullyConnectedLargeTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, transposeWeights);
9618 }
9619
9620 LayerTestResult<float, 4> AdditionAfterMaxPoolTest(
9621     armnn::IWorkloadFactory& workloadFactory,
9622     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9623 {
9624     // Create Initial Tensor
9625     // 1, 2, 3
9626     // 4, 5, 6
9627     // 7, 8, 9
9628
9629     armnn::TensorInfo poolingInputTensorInfo({ 1, 1, 3, 3}, armnn::DataType::Float32);
9630     armnn::TensorInfo poolingOutputTensorInfo({ 1, 1, 2, 2}, armnn::DataType::Float32);
9631
9632     boost::multi_array<float, 4> poolingInput = MakeTensor<float,4>(poolingInputTensorInfo,
9633                                                             {1, 2, 3,
9634                                                              4, 5, 6,
9635                                                              7, 8, 9
9636                                                             });
9637
9638     std::unique_ptr<armnn::ITensorHandle> poolingInputHandle =
9639             workloadFactory.CreateTensorHandle(poolingInputTensorInfo);
9640     std::unique_ptr<armnn::ITensorHandle> poolingOutputHandle =
9641             workloadFactory.CreateTensorHandle(poolingOutputTensorInfo);
9642
9643     // Apply MaxPool poolSize = 1x1, stride=2x2
9644     // Result =
9645     // 1, 3
9646     // 7, 9
9647     armnn::Pooling2dDescriptor descriptor;
9648     descriptor.m_PoolHeight = 1;
9649     descriptor.m_PoolWidth = 1;
9650     descriptor.m_StrideX = 2;
9651     descriptor.m_StrideY = 2;
9652     descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
9653
9654     armnn::Pooling2dQueueDescriptor queueDescriptor;
9655     queueDescriptor.m_Parameters = descriptor;
9656     armnn::WorkloadInfo workloadInfo;
9657     AddInputToWorkload(queueDescriptor, workloadInfo, poolingInputTensorInfo, poolingInputHandle.get());
9658     AddOutputToWorkload(queueDescriptor, workloadInfo, poolingOutputTensorInfo, poolingOutputHandle.get());
9659
9660     // Create the MaxPool
9661     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePooling2d(queueDescriptor, workloadInfo);
9662
9663     //LayerTestResult<float, 4> result(poolingOutputTensorInfo);
9664     auto shape( GetTensorShapeAsArray<4>(poolingOutputTensorInfo));
9665     boost::multi_array<float, 4> resultMaxPool;
9666     resultMaxPool.resize(shape);
9667
9668
9669     // Create addition with another tensor the same size
9670     // This would be the result to apply a Conv2d with kernel ones(2) and stride 1x1
9671     // with the initial tensor.
9672     // 12, 16
9673     // 24, 28
9674
9675     armnn::TensorInfo addInputTensorInfo({ 1,1,2,2}, armnn::DataType::Float32);
9676     armnn::TensorInfo addOutputTensorInfo({ 1,1,2,2}, armnn::DataType::Float32);
9677
9678     boost::multi_array<float, 4> addInput = MakeTensor<float,4>(addInputTensorInfo,
9679                                                                     {12, 16,
9680                                                                      24, 28,
9681                                                                     });
9682
9683     // Expected output tensor after MaxPool and Addition.
9684     LayerTestResult<float,4> addRet(addOutputTensorInfo);
9685     addRet.outputExpected = MakeTensor<float, 4>(addOutputTensorInfo, std::vector<float>(
9686             {
9687                     13, 19,
9688                     31, 37
9689             }));
9690
9691     std::unique_ptr<armnn::ITensorHandle> addInputHandle = workloadFactory.CreateTensorHandle(addInputTensorInfo);
9692     std::unique_ptr<armnn::ITensorHandle> addOutputHandle = workloadFactory.CreateTensorHandle(addOutputTensorInfo);
9693
9694     armnn::AdditionQueueDescriptor data;
9695     armnn::WorkloadInfo info;
9696
9697     // Add the output of the MaxPool and the new tensor
9698     AddInputToWorkload(data, info, poolingOutputTensorInfo, poolingOutputHandle.get());
9699     AddInputToWorkload(data, info, addInputTensorInfo, addInputHandle.get());
9700     AddOutputToWorkload(data, info, addOutputTensorInfo, addOutputHandle.get());
9701
9702     std::unique_ptr<armnn::IWorkload> addWorkload = workloadFactory.CreateAddition(data, info);
9703
9704     poolingInputHandle->Allocate();
9705     poolingOutputHandle->Allocate();
9706     addInputHandle->Allocate();
9707     addOutputHandle->Allocate();
9708
9709     CopyDataToITensorHandle(poolingInputHandle.get(), &poolingInput[0][0][0][0]);
9710     CopyDataFromITensorHandle(&resultMaxPool[0][0][0][0], poolingOutputHandle.get());
9711
9712     CopyDataToITensorHandle(poolingOutputHandle.get(), &resultMaxPool[0][0][0][0]);
9713     CopyDataToITensorHandle(addInputHandle.get(), &addInput[0][0][0][0]);
9714
9715     workload->PostAllocationConfigure();
9716     workload->Execute();
9717     addWorkload->PostAllocationConfigure();
9718     addWorkload->Execute();
9719
9720     CopyDataFromITensorHandle(&addRet.output[0][0][0][0], addOutputHandle.get());
9721
9722     return addRet;
9723 }
9724
9725 LayerTestResult<float, 4> SpaceToBatchNdSimpleFloat32Test(
9726     armnn::IWorkloadFactory& workloadFactory,
9727     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9728 {
9729     return SpaceToBatchNdSimpleTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
9730 }
9731
9732 LayerTestResult<float, 4> SpaceToBatchNdMultiChannelsFloat32Test(
9733     armnn::IWorkloadFactory& workloadFactory,
9734     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9735 {
9736     return SpaceToBatchNdMultiChannelsTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
9737 }
9738
9739 LayerTestResult<float, 4> SpaceToBatchNdMultiBlockFloat32Test(
9740     armnn::IWorkloadFactory& workloadFactory,
9741     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9742 {
9743     return SpaceToBatchNdMultiBlockTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
9744 }
9745
9746 LayerTestResult<float, 4> SpaceToBatchNdPaddingFloat32Test(
9747     armnn::IWorkloadFactory& workloadFactory,
9748     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9749 {
9750     return SpaceToBatchNdPaddingTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
9751 }
9752
9753 LayerTestResult<uint8_t, 4> SpaceToBatchNdSimpleUint8Test(
9754     armnn::IWorkloadFactory& workloadFactory,
9755     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9756 {
9757     return SpaceToBatchNdSimpleTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9758 }
9759
9760 LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiChannelsUint8Test(
9761     armnn::IWorkloadFactory& workloadFactory,
9762     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9763 {
9764     return SpaceToBatchNdMultiChannelsTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9765 }
9766
9767 LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiBlockUint8Test(
9768     armnn::IWorkloadFactory& workloadFactory,
9769     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9770 {
9771     return SpaceToBatchNdMultiBlockTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9772 }
9773
9774 LayerTestResult<uint8_t, 4> SpaceToBatchNdPaddingUint8Test(
9775     armnn::IWorkloadFactory& workloadFactory,
9776     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9777 {
9778     return SpaceToBatchNdPaddingTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9779 }
9780
9781 LayerTestResult<float, 4> SpaceToBatchNdSimpleNHWCFloat32Test(
9782     armnn::IWorkloadFactory& workloadFactory,
9783     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9784 {
9785     return SpaceToBatchNdSimpleNHWCTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
9786 }
9787
9788 LayerTestResult<float, 4> SpaceToBatchNdMultiChannelsNHWCFloat32Test(
9789     armnn::IWorkloadFactory& workloadFactory,
9790     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9791 {
9792     return SpaceToBatchNdMultiChannelsNHWCTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
9793 }
9794
9795 LayerTestResult<float, 4> SpaceToBatchNdMultiBlockNHWCFloat32Test(
9796     armnn::IWorkloadFactory& workloadFactory,
9797     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9798 {
9799     return SpaceToBatchNdMultiBlockNHWCTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
9800 }
9801
9802 LayerTestResult<float, 4> SpaceToBatchNdPaddingNHWCFloat32Test(
9803     armnn::IWorkloadFactory& workloadFactory,
9804     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9805 {
9806     return SpaceToBatchNdPaddingNHWCTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
9807 }
9808
9809 LayerTestResult<uint8_t, 4> SpaceToBatchNdSimpleNHWCUint8Test(
9810     armnn::IWorkloadFactory& workloadFactory,
9811     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9812 {
9813     return SpaceToBatchNdSimpleNHWCTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9814 }
9815
9816 LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiChannelsNHWCUint8Test(
9817     armnn::IWorkloadFactory& workloadFactory,
9818     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9819 {
9820     return SpaceToBatchNdMultiChannelsNHWCTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9821 }
9822
9823 LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiBlockNHWCUint8Test(
9824     armnn::IWorkloadFactory& workloadFactory,
9825     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9826 {
9827     return SpaceToBatchNdMultiBlockNHWCTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9828 }
9829
9830 LayerTestResult<uint8_t, 4> SpaceToBatchNdPaddingNHWCUint8Test(
9831     armnn::IWorkloadFactory& workloadFactory,
9832     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9833 {
9834     return SpaceToBatchNdPaddingNHWCTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9835 }
9836
9837 LayerTestResult<int16_t, 4> SpaceToBatchNdSimpleUint16Test(
9838         armnn::IWorkloadFactory& workloadFactory,
9839         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9840 {
9841     return SpaceToBatchNdSimpleTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9842 }
9843
9844 LayerTestResult<int16_t, 4> SpaceToBatchNdMultiChannelsUint16Test(
9845         armnn::IWorkloadFactory& workloadFactory,
9846         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9847 {
9848     return SpaceToBatchNdMultiChannelsTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9849 }
9850
9851 LayerTestResult<int16_t, 4> SpaceToBatchNdMultiBlockUint16Test(
9852         armnn::IWorkloadFactory& workloadFactory,
9853         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9854 {
9855     return SpaceToBatchNdMultiBlockTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9856 }
9857
9858 LayerTestResult<int16_t, 4> SpaceToBatchNdPaddingUint16Test(
9859         armnn::IWorkloadFactory& workloadFactory,
9860         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9861 {
9862     return SpaceToBatchNdPaddingTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9863 }
9864
9865 LayerTestResult<int16_t, 4> SpaceToBatchNdSimpleNHWCUint16Test(
9866         armnn::IWorkloadFactory& workloadFactory,
9867         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9868 {
9869     return SpaceToBatchNdSimpleNHWCTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9870 }
9871
9872 LayerTestResult<int16_t, 4> SpaceToBatchNdMultiChannelsNHWCUint16Test(
9873         armnn::IWorkloadFactory& workloadFactory,
9874         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9875 {
9876     return SpaceToBatchNdMultiChannelsNHWCTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9877 }
9878
9879 LayerTestResult<int16_t, 4> SpaceToBatchNdMultiBlockNHWCUint16Test(
9880         armnn::IWorkloadFactory& workloadFactory,
9881         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9882 {
9883     return SpaceToBatchNdMultiBlockNHWCTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9884 }
9885
9886 LayerTestResult<int16_t, 4> SpaceToBatchNdPaddingNHWCUint16Test(
9887         armnn::IWorkloadFactory& workloadFactory,
9888         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9889 {
9890     return SpaceToBatchNdPaddingNHWCTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9891 }
9892
9893 LayerTestResult<uint8_t, 4> SpaceToDepthNHWCAsymmQ8Test(
9894     armnn::IWorkloadFactory& workloadFactory,
9895     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9896 {
9897     return SpaceToDepthSimpleTest1<armnn::DataType::QuantisedAsymm8>(
9898         workloadFactory,
9899         memoryManager);
9900 }
9901
9902 LayerTestResult<uint8_t, 4> SpaceToDepthNCHWAsymmQ8Test(
9903     armnn::IWorkloadFactory& workloadFactory,
9904     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9905 {
9906     return SpaceToDepthSimpleTest1<armnn::DataType::QuantisedAsymm8>(
9907         workloadFactory,
9908         memoryManager,
9909         armnn::DataLayout::NCHW);
9910 }
9911
9912 LayerTestResult<float, 4> SpaceToDepthNHWCFloat32Test1(
9913     armnn::IWorkloadFactory& workloadFactory,
9914     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9915 {
9916     return SpaceToDepthSimpleTest1<armnn::DataType::Float32>(
9917         workloadFactory,
9918         memoryManager);
9919 }
9920
9921 LayerTestResult<float, 4> SpaceToDepthNCHWFloat32Test1(
9922     armnn::IWorkloadFactory& workloadFactory,
9923     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9924 {
9925     return SpaceToDepthSimpleTest1<armnn::DataType::Float32>(
9926         workloadFactory,
9927         memoryManager,
9928         armnn::DataLayout::NCHW);
9929 }
9930
9931 LayerTestResult<float, 4> SpaceToDepthNHWCFloat32Test2(
9932     armnn::IWorkloadFactory& workloadFactory,
9933     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9934 {
9935     return SpaceToDepthSimpleTest2<armnn::DataType::Float32>(
9936         workloadFactory,
9937         memoryManager);
9938 }
9939
9940 LayerTestResult<float, 4> SpaceToDepthNCHWFloat32Test2(
9941     armnn::IWorkloadFactory& workloadFactory,
9942     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9943 {
9944     return SpaceToDepthSimpleTest2<armnn::DataType::Float32>(
9945         workloadFactory,
9946         memoryManager,
9947         armnn::DataLayout::NCHW);
9948 }
9949
9950 LayerTestResult<int16_t, 4> SpaceToDepthNHWCQSymm16Test(
9951     armnn::IWorkloadFactory& workloadFactory,
9952     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9953 {
9954     return SpaceToDepthSimpleTest2<armnn::DataType::QuantisedSymm16>(
9955         workloadFactory,
9956         memoryManager);
9957 }
9958
9959 LayerTestResult<int16_t, 4> SpaceToDepthNCHWQSymm16Test(
9960     armnn::IWorkloadFactory& workloadFactory,
9961     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9962 {
9963     return SpaceToDepthSimpleTest2<armnn::DataType::QuantisedSymm16>(
9964         workloadFactory,
9965         memoryManager,
9966         armnn::DataLayout::NCHW);
9967 }
9968
9969 namespace {
9970
9971 } // anonymous namespace
9972
9973 LayerTestResult<float, 4> StridedSlice4DFloat32Test(
9974     armnn::IWorkloadFactory& workloadFactory,
9975     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9976 {
9977     return StridedSlice4DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
9978 }
9979
9980 LayerTestResult<float, 4> StridedSlice4DReverseFloat32Test(
9981     armnn::IWorkloadFactory& workloadFactory,
9982     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9983 {
9984     return StridedSlice4DReverseTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
9985 }
9986
9987 LayerTestResult<float, 4> StridedSliceSimpleStrideFloat32Test(
9988     armnn::IWorkloadFactory& workloadFactory,
9989     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9990 {
9991     return StridedSliceSimpleStrideTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
9992 }
9993
9994 LayerTestResult<float, 4> StridedSliceSimpleRangeMaskFloat32Test(
9995     armnn::IWorkloadFactory& workloadFactory,
9996     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9997 {
9998     return StridedSliceSimpleRangeMaskTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
9999 }
10000
10001 LayerTestResult<float, 2> StridedSliceShrinkAxisMaskFloat32Test(
10002     armnn::IWorkloadFactory& workloadFactory,
10003     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10004 {
10005     return StridedSliceShrinkAxisMaskTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
10006 }
10007
10008 LayerTestResult<float, 3> StridedSlice3DFloat32Test(
10009     armnn::IWorkloadFactory& workloadFactory,
10010     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10011 {
10012     return StridedSlice3DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
10013 }
10014
10015 LayerTestResult<float, 3> StridedSlice3DReverseFloat32Test(
10016     armnn::IWorkloadFactory& workloadFactory,
10017     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10018 {
10019     return StridedSlice3DReverseTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
10020 }
10021
10022 LayerTestResult<float, 2> StridedSlice2DFloat32Test(
10023     armnn::IWorkloadFactory& workloadFactory,
10024     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10025 {
10026     return StridedSlice2DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
10027 }
10028
10029 LayerTestResult<float, 2> StridedSlice2DReverseFloat32Test(
10030     armnn::IWorkloadFactory& workloadFactory,
10031     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10032 {
10033     return StridedSlice2DReverseTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
10034 }
10035
10036 LayerTestResult<uint8_t, 4> StridedSlice4DUint8Test(
10037     armnn::IWorkloadFactory& workloadFactory,
10038     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10039 {
10040     return StridedSlice4DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
10041 }
10042
10043 LayerTestResult<uint8_t, 4> StridedSlice4DReverseUint8Test(
10044     armnn::IWorkloadFactory& workloadFactory,
10045     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10046 {
10047     return StridedSlice4DReverseTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
10048 }
10049
10050 LayerTestResult<uint8_t, 4> StridedSliceSimpleStrideUint8Test(
10051     armnn::IWorkloadFactory& workloadFactory,
10052     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10053 {
10054     return StridedSliceSimpleStrideTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
10055 }
10056
10057 LayerTestResult<uint8_t, 4> StridedSliceSimpleRangeMaskUint8Test(
10058     armnn::IWorkloadFactory& workloadFactory,
10059     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10060 {
10061     return StridedSliceSimpleRangeMaskTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
10062 }
10063
10064 LayerTestResult<uint8_t, 2> StridedSliceShrinkAxisMaskUint8Test(
10065     armnn::IWorkloadFactory& workloadFactory,
10066     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10067 {
10068     return StridedSliceShrinkAxisMaskTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
10069 }
10070
10071 LayerTestResult<uint8_t, 3> StridedSlice3DUint8Test(
10072     armnn::IWorkloadFactory& workloadFactory,
10073     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10074 {
10075     return StridedSlice3DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
10076 }
10077
10078 LayerTestResult<uint8_t, 3> StridedSlice3DReverseUint8Test(
10079     armnn::IWorkloadFactory& workloadFactory,
10080     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10081 {
10082     return StridedSlice3DReverseTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
10083 }
10084
10085 LayerTestResult<uint8_t, 2> StridedSlice2DUint8Test(
10086     armnn::IWorkloadFactory& workloadFactory,
10087     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10088 {
10089     return StridedSlice2DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
10090 }
10091
10092 LayerTestResult<uint8_t, 2> StridedSlice2DReverseUint8Test(
10093     armnn::IWorkloadFactory& workloadFactory,
10094     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10095 {
10096     return StridedSlice2DReverseTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
10097 }
10098
10099 LayerTestResult<int16_t, 4> StridedSlice4DInt16Test(
10100     armnn::IWorkloadFactory& workloadFactory,
10101     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10102 {
10103     return StridedSlice4DTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
10104 }
10105
10106 LayerTestResult<int16_t, 4> StridedSlice4DReverseInt16Test(
10107     armnn::IWorkloadFactory& workloadFactory,
10108     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10109 {
10110     return StridedSlice4DReverseTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
10111 }
10112
10113 LayerTestResult<int16_t, 4> StridedSliceSimpleStrideInt16Test(
10114     armnn::IWorkloadFactory& workloadFactory,
10115     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10116 {
10117     return StridedSliceSimpleStrideTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
10118 }
10119
10120 LayerTestResult<int16_t, 4> StridedSliceSimpleRangeMaskInt16Test(
10121     armnn::IWorkloadFactory& workloadFactory,
10122     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10123 {
10124     return StridedSliceSimpleRangeMaskTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
10125 }
10126
10127 LayerTestResult<int16_t, 2> StridedSliceShrinkAxisMaskInt16Test(
10128     armnn::IWorkloadFactory& workloadFactory,
10129     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10130 {
10131     return StridedSliceShrinkAxisMaskTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
10132 }
10133
10134 LayerTestResult<int16_t, 3> StridedSlice3DInt16Test(
10135     armnn::IWorkloadFactory& workloadFactory,
10136     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10137 {
10138     return StridedSlice3DTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
10139 }
10140
10141 LayerTestResult<int16_t, 3> StridedSlice3DReverseInt16Test(
10142     armnn::IWorkloadFactory& workloadFactory,
10143     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10144 {
10145     return StridedSlice3DReverseTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
10146 }
10147
10148 LayerTestResult<int16_t, 2> StridedSlice2DInt16Test(
10149     armnn::IWorkloadFactory& workloadFactory,
10150     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10151 {
10152     return StridedSlice2DTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
10153 }
10154
10155 LayerTestResult<int16_t, 2> StridedSlice2DReverseInt16Test(
10156     armnn::IWorkloadFactory& workloadFactory,
10157     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10158 {
10159     return StridedSlice2DReverseTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
10160 }
10161
10162 LayerTestResult<float, 4> Debug4DFloat32Test(
10163     armnn::IWorkloadFactory& workloadFactory,
10164     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10165 {
10166     return Debug4DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
10167 }
10168
10169 LayerTestResult<float, 3> Debug3DFloat32Test(
10170     armnn::IWorkloadFactory& workloadFactory,
10171     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10172 {
10173     return Debug3DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
10174 }
10175
10176 LayerTestResult<float, 2> Debug2DFloat32Test(
10177     armnn::IWorkloadFactory& workloadFactory,
10178     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10179 {
10180     return Debug2DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
10181 }
10182
10183 LayerTestResult<float, 1> Debug1DFloat32Test(
10184     armnn::IWorkloadFactory& workloadFactory,
10185     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10186 {
10187     return Debug1DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
10188 }
10189
10190 LayerTestResult<uint8_t, 4> Debug4DUint8Test(
10191     armnn::IWorkloadFactory& workloadFactory,
10192     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10193 {
10194     return Debug4DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
10195 }
10196
10197 LayerTestResult<uint8_t, 3> Debug3DUint8Test(
10198     armnn::IWorkloadFactory& workloadFactory,
10199     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10200 {
10201     return Debug3DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
10202 }
10203
10204 LayerTestResult<uint8_t, 2> Debug2DUint8Test(
10205     armnn::IWorkloadFactory& workloadFactory,
10206     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10207 {
10208     return Debug2DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
10209 }
10210
10211 LayerTestResult<uint8_t, 1> Debug1DUint8Test(
10212     armnn::IWorkloadFactory& workloadFactory,
10213     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10214 {
10215     return Debug1DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
10216 }
10217
10218 LayerTestResult<float, 1> Gather1DParamsFloatTest(
10219     armnn::IWorkloadFactory& workloadFactory,
10220     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10221 {
10222     return Gather1DParamsTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager);
10223 }
10224
10225 LayerTestResult<uint8_t, 1> Gather1DParamsUint8Test(
10226     armnn::IWorkloadFactory& workloadFactory,
10227     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10228 {
10229     return Gather1DParamsTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
10230 }
10231
10232 LayerTestResult<int16_t, 1> Gather1DParamsInt16Test(
10233         armnn::IWorkloadFactory& workloadFactory,
10234         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10235 {
10236     return Gather1DParamsTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
10237 }
10238
10239 LayerTestResult<float, 2> GatherMultiDimParamsFloatTest(
10240     armnn::IWorkloadFactory& workloadFactory,
10241     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10242 {
10243     return GatherMultiDimParamsTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager);
10244 }
10245
10246 LayerTestResult<uint8_t, 2> GatherMultiDimParamsUint8Test(
10247     armnn::IWorkloadFactory& workloadFactory,
10248     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10249 {
10250     return GatherMultiDimParamsTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
10251 }
10252
10253 LayerTestResult<int16_t, 2> GatherMultiDimParamsInt16Test(
10254         armnn::IWorkloadFactory& workloadFactory,
10255         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10256 {
10257     return GatherMultiDimParamsTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
10258 }
10259
10260 LayerTestResult<float, 4> GatherMultiDimParamsMultiDimIndicesFloatTest(
10261     armnn::IWorkloadFactory& workloadFactory,
10262     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10263 {
10264     return GatherMultiDimParamsMultiDimIndicesTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager);
10265 }
10266
10267 LayerTestResult<uint8_t, 4> GatherMultiDimParamsMultiDimIndicesUint8Test(
10268     armnn::IWorkloadFactory& workloadFactory,
10269     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10270 {
10271     return GatherMultiDimParamsMultiDimIndicesTestImpl<armnn::DataType::QuantisedAsymm8>(
10272         workloadFactory, memoryManager);
10273 }
10274
10275 LayerTestResult<int16_t, 4> GatherMultiDimParamsMultiDimIndicesInt16Test(
10276         armnn::IWorkloadFactory& workloadFactory,
10277         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10278 {
10279     return GatherMultiDimParamsMultiDimIndicesTestImpl<armnn::DataType::QuantisedSymm16>(
10280             workloadFactory, memoryManager);
10281 }
10282
10283 LayerTestResult<float, 4> DequantizeSimpleUint8Test(
10284     armnn::IWorkloadFactory& workloadFactory,
10285     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10286 {
10287     return DequantizeSimpleTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
10288 }
10289
10290 LayerTestResult<float, 4> DequantizeOffsetUint8Test(
10291     armnn::IWorkloadFactory& workloadFactory,
10292     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10293 {
10294     return DequantizeOffsetTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
10295 }
10296
10297 LayerTestResult<float, 4> DequantizeSimpleInt16Test(
10298     armnn::IWorkloadFactory& workloadFactory,
10299     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10300 {
10301     return DequantizeSimpleTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
10302 }
10303
10304 LayerTestResult<uint8_t, 4> QuantizeSimpleUint8Test(
10305     armnn::IWorkloadFactory& workloadFactory,
10306     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10307 {
10308     return QuantizeSimpleTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
10309 }
10310
10311 LayerTestResult<uint8_t, 4> QuantizeClampUint8Test(
10312     armnn::IWorkloadFactory& workloadFactory,
10313     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10314 {
10315     return QuantizeClampTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
10316 }
10317
10318 LayerTestResult<int16_t, 4> QuantizeClampInt16Test(
10319     armnn::IWorkloadFactory& workloadFactory,
10320     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10321 {
10322     return QuantizeClampTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
10323 }