IVGCVSW-3656 Make the reference backend optional
[platform/upstream/armnn.git] / src / backends / backendsCommon / test / LayerTests.cpp
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #include "LayerTests.hpp"
6 #include "WorkloadTestUtils.hpp"
7 #include "TensorUtils.hpp"
8 #include <ResolveType.hpp>
9
10 #include "test/TensorHelpers.hpp"
11 #include "TensorCopyUtils.hpp"
12 #include "Permute.hpp"
13
14 #include <boost/test/unit_test.hpp>
15 #include <boost/assert.hpp>
16
17 #include <armnn/LayerSupport.hpp>
18
19 #include <backendsCommon/CpuTensorHandle.hpp>
20 #include <backendsCommon/IBackendInternal.hpp>
21 #include <backendsCommon/WorkloadFactory.hpp>
22
23 #include <algorithm>
24 #include <boost/cast.hpp>
25
26 #include "WorkloadTestUtils.hpp"
27 #include "Conv2dTestImpl.hpp"
28 #include "BatchNormTestImpl.hpp"
29 #include "ActivationTestImpl.hpp"
30 #include "Pooling2dTestImpl.hpp"
31 #include "FullyConnectedTestImpl.hpp"
32 #include "GatherTestImpl.hpp"
33 #include "SpaceToBatchNdTestImpl.hpp"
34 #include "SpaceToDepthTestImpl.hpp"
35 #include "SplitterTestImpl.hpp"
36 #include "SoftmaxTestImpl.hpp"
37 #include "StridedSliceTestImpl.hpp"
38 #include "NormTestImpl.hpp"
39 #include "LstmTestImpl.hpp"
40 #include "ConvertFp16ToFp32TestImpl.hpp"
41 #include "ConvertFp32ToFp16TestImpl.hpp"
42 #include "DebugTestImpl.hpp"
43 #include "DequantizeTestImpl.hpp"
44 #include "QuantizeTestImpl.hpp"
45 #include "TransposeConvolution2dTestImpl.hpp"
46
47 // 3-channel 16x8 image used as common input data for a number of Conv2d tests.
48 static std::vector<float> ConvInput3x8x16({
49     0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
50     0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
51     0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
52     0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
53     0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
54     0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
55     0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
56     0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
57     0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
58     0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
59     0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
60     0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
61     0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
62     0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
63     0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
64     0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
65     -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
66     -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
67     -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
68     -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
69     -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
70     -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
71     -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
72     -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1
73 });
74
75 // 2-channel bias used by a number of Conv2d tests.
76 static std::vector<float> Bias2({0, 2});
77
78 struct Simple3dSoftmaxOutputData
79 {
80     const std::vector<float> outputData =
81             {
82                 0.0964599f, 0.26220518f, 0.0964599f, 0.0964599f,
83                 0.15903549f, 0.0964599f, 0.0964599f, 0.0964599f
84             };
85
86     const armnn::TensorShape inputShape{ 1, 8, 1 };
87
88     const std::vector<float> inputData =
89             {
90                     0.f, 1.f, 0.f, 0.f,
91                     .5f, 0.f, 0.f, 0.f,
92             };
93 };
94
95 struct Simple4dSoftmaxData
96 {
97     const armnn::TensorShape inputShape{ 1, 8, 1, 1 };
98
99     const std::vector<float> outputData = { 0.0964599f, 0.26220518f, 0.0964599f, 0.0964599f,
100                                             0.15903549f, 0.0964599f, 0.0964599f, 0.0964599f };
101     const std::vector<float> inputData =
102             {
103                     0.f, 1.f, 0.f, 0.f,
104                     .5f, 0.f, 0.f, 0.f
105             };
106 };
107
108 // Helper function that returns either Bias2 or an empty vector depending on whether bias is enabled.
109 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
110 boost::multi_array<T, 1> GetBias2(bool biasEnabled, float qScale)
111 {
112     if(biasEnabled)
113     {
114         armnn::TensorInfo biasDesc({static_cast<unsigned int>(Bias2.size())}, ArmnnType);
115         boost::multi_array<T, 1> bias = MakeTensor<T, 1>(biasDesc, QuantizedVector<T>(qScale, 0.0f, Bias2));
116         return bias;
117     }
118     else
119     {
120         return boost::multi_array<T, 1>();
121     }
122 }
123
124 template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
125 LayerTestResult<T, 4> SimpleConvolution2d3x5TestCommon(
126     armnn::IWorkloadFactory& workloadFactory,
127     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
128     float qScale,
129     int32_t qOffset,
130     bool biasEnabled,
131     const armnn::DataLayout layout)
132 {
133     // Use common single-batch 3-channel 16x8 image.
134     armnn::TensorInfo inputDesc({1, 3, 8, 16}, ArmnnType);
135     boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, QuantizedVector<T>(qScale, qOffset, ConvInput3x8x16));
136
137     // Use a 2-element batch with 3-channel 3x5 kernels.
138     armnn::TensorInfo kernelDesc({2, 3, 5, 3}, ArmnnType);
139     boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
140         QuantizedVector<T>(qScale, qOffset, {
141             1, 1, 1,
142             1, -1, 1,
143             1, 1, 1,
144             1, 1, 1,
145             1, 1, 1,
146
147             0, 0, 0,
148             0, 0, 0,
149             0, 0, 0,
150             0, 0, 0,
151             0, 0, 0,
152
153             2, 2, 2,
154             2, 2, 2,
155             2, 2, 2,
156             2, 2, 2,
157             2, 2, 2,
158
159
160             0, 0, 0,
161             0, 0, 0,
162             0, 0, 0,
163             0, 0, 0,
164             0, 0, 0,
165
166             1, 1, 1,
167             1, 1, 1,
168             1, 1, 1,
169             1, 1, 1,
170             1, 1, 1,
171
172             0, 0, 0,
173             0, 0, 0,
174             0, 0, 0,
175             0, 0, 0,
176             0, 0, 0
177         })));
178
179     // Expected output is 2 batch elements of a 1-channel 14x4 image.
180     armnn::TensorInfo outputDesc({1, 2, 4, 14}, ArmnnType);
181     boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
182         QuantizedVector<T>(qScale, qOffset, {
183             -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24,
184             -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25,
185             -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f,
186             -23.5f, -23.5f, -23.5f,
187             -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f,
188             -23.5f, -23.5f, -23.5f,
189
190             5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
191             5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
192             5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
193             5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
194         })));
195
196     return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
197         workloadFactory,
198         memoryManager,
199         input,
200         kernel,
201         GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
202         expectedOutput,
203         qScale,
204         qOffset,
205         layout);
206 }
207
208 template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
209          typename T = armnn::ResolveType<ArmnnType>>
210 LayerTestResult<T, 4> SimpleConvolution2d3x3TestCommon(
211     armnn::IWorkloadFactory& workloadFactory,
212     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
213     float qScale,
214     int32_t qOffset,
215     bool biasEnabled,
216     const armnn::DataLayout layout)
217 {
218     // Use a 3x3 kernel, which exercises ArmCompute's direct convolution path.
219
220     // Use common single-batch 3-channel 16x8 image.
221     armnn::TensorInfo inputDesc({1, 3, 8, 16}, ArmnnType);
222     boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, QuantizedVector<T>(qScale, qOffset, ConvInput3x8x16));
223
224     // Use a 2-element batch of 3-channel 3x3 kernels.
225     armnn::TensorInfo kernelDesc({2, 3, 3, 3}, ArmnnType);
226     boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
227         QuantizedVector<T>(qScale, qOffset, {
228             1, 1, 1,
229             1, -1, 1,
230             1, 1, 1,
231
232             0, 0, 0,
233             0, 0, 0,
234             0, 0, 0,
235
236             2, 2, 2,
237             2, 2, 2,
238             2, 2, 2,
239
240
241             0, 0, 0,
242             0, 0, 0,
243             0, 0, 0,
244
245             1, 1, 1,
246             1, 1, 1,
247             1, 1, 1,
248
249             0, 0, 0,
250             0, 0, 0,
251             0, 0, 0
252         })));
253
254     // Expected output is 1 batch of a 2-channel 14x6 image.
255     armnn::TensorInfo outputDesc({1, 2, 6, 14}, ArmnnType);
256     boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
257         QuantizedVector<T>(qScale, qOffset, {
258             -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15,
259             -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16,
260             -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
261             -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
262             -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
263             -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
264
265             3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
266             3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
267             3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
268             3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
269             3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
270             3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
271         })));
272
273     return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
274         workloadFactory,
275         memoryManager,
276         input,
277         kernel,
278         GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
279         expectedOutput,
280         qScale,
281         qOffset,
282         layout);
283 }
284
285 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
286 LayerTestResult<T, 4> SimpleConvolution2d3x3NhwcTestCommon(
287     armnn::IWorkloadFactory& workloadFactory,
288     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
289     float qScale,
290     int32_t qOffset,
291     bool biasEnabled,
292     armnn::DataLayout dataLayout)
293 {
294     // Use common single-batch 5x5 image.
295
296     armnn::TensorInfo inputDesc({1, 3, 4, 1}, ArmnnType);
297     boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc,
298                                                       {
299                                                        1, 5, 2, 3,
300                                                        8, 7, 3, 6,
301                                                        3, 3, 9, 1
302                                                        });
303
304
305     // Use a 2-element batch of 3-channel 3x3 kernels.
306     armnn::TensorInfo kernelDesc({1, 3, 3, 1}, ArmnnType);
307     boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, {
308                                                                     4, 5, 6,
309                                                                     0, 0, 0,
310                                                                     3, 2, 1
311                                                                     });
312
313     // Expected output is 1 batch of a 5x5 image.
314     armnn::TensorInfo outputDesc({1, 3, 4, 1}, ArmnnType);
315
316     const std::vector<float> outputData =
317             {
318                     23, 41, 33, 21,
319                     44, 65, 76, 52,
320                     82, 85, 79, 42
321             };
322
323     boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, outputData);
324
325     return SimpleConvolution2dNhwcTestImpl<ArmnnType, ArmnnType>(
326         workloadFactory,
327         memoryManager,
328         input,
329         kernel,
330         boost::multi_array<T, 1>(),
331         expectedOutput,
332         dataLayout,
333         qScale,
334         qOffset);
335 }
336
337 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
338 LayerTestResult<T, 4> SimpleConvolution2d3x3Stride2x2TestCommon(
339         armnn::IWorkloadFactory& workloadFactory,
340         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
341         float qScale,
342         int32_t qOffset,
343         bool biasEnabled,
344         const armnn::DataLayout& dataLayout)
345 {
346     // Input is a single-batch, 1 channel, 5x5 image.
347     armnn::TensorInfo inputDesc({1, 5, 5, 1}, ArmnnType);
348     boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc,
349             {
350                 1, 5, 2, 3, 5,
351                 8, 7, 3, 6, 3,
352                 3, 3, 9, 1, 9,
353                 4, 1, 8, 1, 3,
354                 6, 8, 1, 9, 2
355             });
356
357     // Use a 3x3 kernel.
358     armnn::TensorInfo kernelDesc({1, 3, 3, 1}, ArmnnType);
359     boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc,
360             {
361                 4, 5, 6,
362                 0, 0, 0,
363                 3, 2, 1
364             });
365
366     // Expected output is a single-batch, 1 channel, 3x3 image.
367     armnn::TensorInfo outputDesc({1, 3, 3, 1}, ArmnnType);
368
369     const std::vector<T> outputData =
370             {
371                 23, 33, 24,
372                 91, 99, 48,
373                 26, 50, 19
374             };
375
376     boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, outputData);
377
378     uint32_t padLeft = 1;
379     uint32_t padTop = 1;
380     uint32_t padRight = 1;
381     uint32_t padBottom = 1;
382     uint32_t strideX  = 2;
383     uint32_t strideY  = 2;
384
385     return SimpleConvolution2dNhwcTestImpl<ArmnnType, ArmnnType>(
386         workloadFactory,
387         memoryManager,
388         input,
389         kernel,
390         boost::multi_array<T, 1>(),
391         expectedOutput,
392         dataLayout,
393         qScale,
394         qOffset,
395         padLeft,
396         padTop,
397         padRight,
398         padBottom,
399         strideX,
400         strideY);
401 }
402
403 LayerTestResult<float, 4> SimpleConvolution2d3x5Test(
404     armnn::IWorkloadFactory& workloadFactory,
405     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
406     bool biasEnabled,
407     const armnn::DataLayout layout)
408 {
409     return SimpleConvolution2d3x5TestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
410         workloadFactory, memoryManager, 0.f, 0, biasEnabled, layout);
411 }
412
413 LayerTestResult<uint8_t, 4> SimpleConvolution2d3x5Uint8Test(
414     armnn::IWorkloadFactory& workloadFactory,
415     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
416     bool biasEnabled,
417     const armnn::DataLayout layout)
418 {
419     return SimpleConvolution2d3x5TestCommon<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
420         workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
421 }
422
423 LayerTestResult<float, 4> SimpleConvolution2d3x3Test(
424     armnn::IWorkloadFactory& workloadFactory,
425     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
426     bool biasEnabled,
427     const armnn::DataLayout layout)
428 {
429     return SimpleConvolution2d3x3TestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
430         workloadFactory, memoryManager, 0.f, 0, biasEnabled, layout);
431 }
432
433 LayerTestResult<float, 4> SimpleConvolution2d3x3NhwcTest(
434     armnn::IWorkloadFactory& workloadFactory,
435     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
436     bool biasEnabled)
437 {
438     return SimpleConvolution2d3x3NhwcTestCommon<armnn::DataType::Float32>(
439         workloadFactory,
440         memoryManager,
441         0.f,
442         0,
443         biasEnabled,
444         armnn::DataLayout::NHWC);
445 }
446
447 LayerTestResult<float, 4> SimpleConvolution2d3x3Stride2x2Test(
448         armnn::IWorkloadFactory& workloadFactory,
449         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
450         bool biasEnabled,
451         const armnn::DataLayout layout)
452 {
453     return SimpleConvolution2d3x3Stride2x2TestCommon<armnn::DataType::Float32>(
454         workloadFactory,
455         memoryManager,
456         0.f,
457         0,
458         biasEnabled,
459         layout);
460 }
461
462 LayerTestResult<uint8_t, 4> SimpleConvolution2d3x3Uint8Test(
463     armnn::IWorkloadFactory& workloadFactory,
464     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
465     bool biasEnabled,
466     const armnn::DataLayout layout)
467 {
468     return SimpleConvolution2d3x3TestCommon<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
469         workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
470 }
471
472 LayerTestResult<int16_t, 4> SimpleConvolution2d3x5QSymm16Test(
473     armnn::IWorkloadFactory& workloadFactory,
474     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
475     bool biasEnabled,
476     const armnn::DataLayout layout)
477 {
478 return SimpleConvolution2d3x5TestCommon<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
479         workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
480 }
481
482 LayerTestResult<int16_t, 4> SimpleConvolution2d3x3QSymm16Test(
483     armnn::IWorkloadFactory& workloadFactory,
484     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
485     bool biasEnabled,
486     const armnn::DataLayout layout)
487 {
488     return SimpleConvolution2d3x3TestCommon<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
489             workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
490 }
491
492 template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
493          typename T = armnn::ResolveType<ArmnnType>>
494 LayerTestResult<T, 4> Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTestCommon(
495     armnn::IWorkloadFactory& workloadFactory,
496     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
497     const armnn::DataLayout layout,
498     float qScale,
499     int32_t qOffset)
500 {
501     // Use a single-batch 1-channel 3x3 image as input.
502     armnn::TensorInfo inputDesc({1, 1, 3, 3}, ArmnnType);
503     boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, std::vector<T>(
504         QuantizedVector<T>(qScale, qOffset, {
505             11,21,31,
506             12,22,32,
507             13,23,33
508         })));
509
510     // Use 1 batch of a 1-channel 2x2 kernel.
511     armnn::TensorInfo kernelDesc({1, 1, 2, 2}, ArmnnType);
512     boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
513         QuantizedVector<T>(qScale, qOffset, {
514             -11,-21,
515             -12,-22,
516         })));
517
518 // Expected output is 1 batch of a 1-channel 6x8 image.
519 // Manually calculated like this:
520 //[-11*0 -21*0  -12*0 -22*0  ; -11*0  -21*0  -12*0  -22*0  ; -11*0  -21*0  -12*0  -22*0  ; -11*0  -21*0 -12*0  -22*0 ..]
521 //[-11*0 -21*0  -12*0 -22*11 ; -11*0  -21*0  -12*11 -22*21 ; -11*0  -21*0  -12*21 -22*31 ; -11*0  -21*0 -12*31 -22*0 ..]
522 //[-11*0 -21*11 -12*0 -22*12 ; -11*11 -21*21 -12*12 -22*22 ; -11*21 -21*31 -12*22 -22*32 ; -11*31 -21*0 -12*32 -22*0 ..]
523 //[-11*0 -21*12 -12*0 -22*13 ; -11*12 -21*22 -12*13 -22*23 ; -11*22 -21*32 -12*23 -22*33 ; -11*32 -21*0 -12*33 -22*0 ..]
524 //[-11*0 -21*13 -12*0 -22*0  ; -11*13 -21*23 -12*0  -22*0  ; -11*23 -21*33 -12*0  -22*0  ; -11*33 -21*0 -12*0  -22*0 ..]
525 //[-11*0 -21*0  -12*0 -22*0  ; -11*0  -21*0  -12*0  -22*0  ; -11*0  -21*0  -12*0  -22*0  ; -11*0  -21*0 -12*0  -22*0 ..]
526 //[..... .....  ..... .....  ; .....  .....  .....  .....  ; .....  .....  .....  .....  ; .....  ..... .....  ..... ..]
527     armnn::TensorInfo outputDesc({1, 1, 8, 6}, ArmnnType);
528     boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
529         QuantizedVector<T>(qScale, qOffset, {
530                0,    0,      0,    0,    0,    0,
531             -242,  -594,  -934, -372,    0,    0,
532             -495, -1190, -1850, -725,    0,    0,
533             -538, -1256, -1916, -748,    0,    0,
534             -273, -626,  -946,  -363,    0,    0,
535                0,    0,     0,     0,    0,    0,
536                0,    0,     0,     0,    0,    0,
537                0,    0,     0,     0,    0,    0
538         })));
539
540     return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
541         workloadFactory,
542         memoryManager,
543         input,
544         kernel,
545         GetBias2<ArmnnBType>(false, qScale * qScale),
546         expectedOutput,
547         qScale,
548         qOffset,
549         layout,
550         1,  // Padding left.
551         2,  // Padding top.
552         3,  // Padding right.
553         4); // Padding bottom.
554 }
555
556 template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
557          typename T = armnn::ResolveType<ArmnnType>>
558 LayerTestResult<T, 4> SimpleConvolution2dAsymmetricPaddingTestCommon(
559     armnn::IWorkloadFactory& workloadFactory,
560     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
561     const armnn::DataLayout layout,
562     float qScale,
563     int32_t qOffset)
564 {
565     // Use a single-batch 1-channel 5x5 image as input.
566     armnn::TensorInfo inputDesc({ 1, 1, 5, 5 }, ArmnnType);
567     boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, std::vector<T>(
568         QuantizedVector<T>(qScale, qOffset, {
569             11,21,31,41,51,
570             12,22,32,42,52,
571             13,23,33,43,53,
572             14,24,34,44,54,
573             15,25,35,45,55,
574         })));
575
576     // Use 1 batch of a 1-channel 4x4 kernel.
577     armnn::TensorInfo kernelDesc({ 1, 1, 4, 4 }, ArmnnType);
578     boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
579         QuantizedVector<T>(qScale, qOffset, {
580             -11,-21,-31,-41,
581             -12,-22,-32,-42,
582             -13,-23,-33,-43,
583             -14,-24,-34,-44,
584         })));
585
586     // Expected output is 1 batch of a 1-channel 5x5 image.
587     armnn::TensorInfo outputDesc({ 1, 1, 5, 5 }, ArmnnType);
588     std::vector<T> myVec(outputDesc.GetNumElements(), 0);
589     boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
590         QuantizedVector<T>(qScale, qOffset, {
591             -7140, -10580, -13940,  -9300, -5230,
592             -9590, -14120, -18520, -12290, -6860,
593             -9980, -14560, -18960, -12560, -7000,
594             -7518, -10904, -14144,  -9318, -5152,
595             -5032,  -7256,  -9376,  -6142, -3368,
596         })));
597
598     return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
599         workloadFactory,
600         memoryManager,
601         input,
602         kernel,
603         GetBias2<ArmnnBType>(false, qScale * qScale),
604         expectedOutput,
605         qScale,
606         qOffset,
607         layout,
608         1,  // Padding left.
609         1,  // Padding top.
610         2,  // Padding right.
611         2); // Padding bottom.
612 }
613
614 LayerTestResult<float, 4> Convolution2dAsymmetricPaddingTest(
615     armnn::IWorkloadFactory& workloadFactory,
616     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
617     armnn::DataLayout layout)
618 {
619     return SimpleConvolution2dAsymmetricPaddingTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
620             workloadFactory, memoryManager, layout, 0.0f, 0);
621 }
622
623 LayerTestResult<float, 4> Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTest(
624     armnn::IWorkloadFactory& workloadFactory,
625     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
626     armnn::DataLayout layout)
627 {
628     return Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTestCommon
629             <armnn::DataType::Float32, armnn::DataType::Float32>(
630             workloadFactory, memoryManager, layout, 0.0f, 0);
631 }
632
633 LayerTestResult<float, 4> Convolution1dTest(
634     armnn::IWorkloadFactory& workloadFactory,
635     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
636     bool biasEnabled)
637 {
638     return Convolution1dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
639             workloadFactory, memoryManager, 0.0f, 0, biasEnabled);
640 }
641
642 LayerTestResult<uint8_t, 4> Convolution1dUint8Test(
643     armnn::IWorkloadFactory& workloadFactory,
644     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
645     bool biasEnabled)
646 {
647     return Convolution1dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
648             workloadFactory, memoryManager, 0.1f, 128, biasEnabled);
649 }
650
651 LayerTestResult<float,4> CompareConvolution2dTest(
652     armnn::IWorkloadFactory& workloadFactory,
653     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
654     armnn::IWorkloadFactory& refWorkloadFactory)
655 {
656     return CompareConvolution2dTestImpl<armnn::DataType::Float32>(
657             workloadFactory, memoryManager, refWorkloadFactory);
658 }
659
660 template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
661 LayerTestResult<T, 4> Convolution2d3x3DilationTestCommon(
662     armnn::IWorkloadFactory& workloadFactory,
663     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
664     const std::vector<float>& inputNoQuantizedValues,
665     armnn::TensorInfo& inputTensorInfo,
666     const std::vector<float>& kernelNoQuantizedValues,
667     armnn::TensorInfo& kernelTensorInfo,
668     const std::vector<float>& outputExpectedNoQuantizedValues,
669     armnn::TensorInfo& outputTensorInfo,
670     uint32_t dilationX,
671     uint32_t dilationY,
672     armnn::DataLayout layout = armnn::DataLayout::NCHW,
673     uint32_t padLeft = 0,
674     uint32_t padTop = 0,
675     uint32_t padRight = 0,
676     uint32_t padBottom = 0,
677     uint32_t strideX  = 1,
678     uint32_t strideY  = 1,
679     bool biasEnabled = false
680 )
681 {
682     float qScale;
683     int32_t qOffset;
684     switch (ArmnnType)
685     {
686         case armnn::DataType::QuantisedAsymm8:
687         {
688             qScale = 0.1f;
689             qOffset = 128;
690             break;
691         }
692         case armnn::DataType::QuantisedSymm16:
693         {
694             qScale = 0.1f;
695             qOffset = 0;
696             break;
697         }
698         case armnn::DataType::Float32:
699         default:
700         {
701             qScale = 0.f;
702             qOffset = 0;
703             break;
704         }
705     }
706
707     inputTensorInfo.SetQuantizationScale(qScale);
708     inputTensorInfo.SetQuantizationOffset(qOffset);
709     kernelTensorInfo.SetQuantizationScale(qScale);
710     kernelTensorInfo.SetQuantizationOffset(qOffset);
711     outputTensorInfo.SetQuantizationScale(qScale);
712     outputTensorInfo.SetQuantizationOffset(qOffset);
713
714     auto input = MakeTensor<T, 4>(inputTensorInfo,
715                                   std::vector<T>(QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(),
716                                                                     inputTensorInfo.GetQuantizationOffset(),
717                                                                     inputNoQuantizedValues)));
718     auto kernel = MakeTensor<T, 4>(kernelTensorInfo,
719                                   std::vector<T>(QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(),
720                                                                     kernelTensorInfo.GetQuantizationOffset(),
721                                                                     kernelNoQuantizedValues)));
722     auto expectedOutput = MakeTensor<T, 4>(outputTensorInfo,
723                                            std::vector<T>(QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(),
724                                                                              outputTensorInfo.GetQuantizationOffset(),
725                                                                              outputExpectedNoQuantizedValues)));
726
727     return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
728             workloadFactory,
729             memoryManager,
730             input,
731             kernel,
732             GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
733             expectedOutput,
734             qScale,
735             qOffset,
736             layout,
737             padLeft,
738             padTop,
739             padRight,
740             padBottom,
741             strideX,
742             strideY,
743             dilationX,
744             dilationY);
745 }
746
747 template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
748 LayerTestResult<T, 4> Convolution2d3x3Dilation3x3Test(
749     armnn::IWorkloadFactory& workloadFactory,
750     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
751     bool biasEnabled,
752     const armnn::DataLayout layout)
753 {
754     armnn::TensorInfo inputTensorInfo({1, 1, 10, 10}, ArmnnType);
755     std::vector<float> inputNoQuantizedValues =
756     {
757         0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
758         0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
759         0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
760         0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
761         0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
762         0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
763         0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
764         0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
765         0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
766         0, 0, 0, 0, 0, 0, 0, 0, 0, 0
767     };
768
769     armnn::TensorInfo kernelTensorInfo({ 1, 1, 3, 3}, ArmnnType);
770     std::vector<float> kernelNoQuantizedValues =
771     {
772         1, 2, 3,
773         4, 5, 6,
774         7, 8, 9
775     };
776
777     // Since the dilation rate is 3 this will dilate the kernel to be like 7x7,
778     // therefore the output will be 4x4: (I−K+2P)/S +1 => (10-7 +0)/1 +1
779     armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4}, ArmnnType);
780     std::vector<float> outputExpectedNoQuantizedValues =
781     {
782         6., 5., 5., 5.,
783         6., 5., 5., 5.,
784         6., 5., 5., 5.,
785         3., 2., 2., 2.
786     };
787
788     return Convolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
789             workloadFactory,
790             memoryManager,
791             inputNoQuantizedValues,
792             inputTensorInfo,
793             kernelNoQuantizedValues,
794             kernelTensorInfo,
795             outputExpectedNoQuantizedValues,
796             outputTensorInfo,
797             3,
798             3,
799             layout,
800             biasEnabled);
801 }
802
803 template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
804 LayerTestResult<T, 4> Convolution2d2x3x3Dilation3x3Test(
805     armnn::IWorkloadFactory& workloadFactory,
806     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
807     bool biasEnabled,
808     const armnn::DataLayout layout)
809 {
810     armnn::TensorInfo inputTensorInfo({1, 2, 10, 10}, ArmnnType);
811     std::vector<float> inputNoQuantizedValues =
812     {
813         0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
814         0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
815         0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
816         0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
817         0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
818         0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
819         0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
820         0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
821         0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
822         0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
823
824         0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
825         0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
826         0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
827         0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
828         0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
829         0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
830         0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
831         0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
832         0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
833         0, 0, 0, 0, 0, 0, 0, 0, 0, 0
834     };
835
836     armnn::TensorInfo kernelTensorInfo({ 1, 2, 3, 3}, ArmnnType);
837     std::vector<float> kernelNoQuantizedValues =
838     {
839         1, 2, 3,
840         4, 5, 6,
841         7, 8, 9,
842
843         1, 2, 3,
844         4, 5, 6,
845         7, 8, 9
846     };
847
848     // Since the dilation rate is 3 this will dilate the kernel to be like 7x7,
849     // therefore the output will be 4x4: (I−K+2P)/S +1 => (10-7 +0)/1 +1
850     armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4}, ArmnnType);
851     std::vector<float> outputExpectedNoQuantizedValues =
852     {
853         12., 10., 10., 10.,
854         12., 10., 10., 10.,
855         12., 10., 10., 10.,
856          6.,  4.,  4.,  4.
857     };
858
859     return Convolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
860             workloadFactory,
861             memoryManager,
862             inputNoQuantizedValues,
863             inputTensorInfo,
864             kernelNoQuantizedValues,
865             kernelTensorInfo,
866             outputExpectedNoQuantizedValues,
867             outputTensorInfo,
868             3,
869             3,
870             layout,
871             biasEnabled);
872 }
873
874 template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
875 LayerTestResult<T, 4> Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test(
876         armnn::IWorkloadFactory &workloadFactory,
877         const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager,
878         bool biasEnabled,
879         const armnn::DataLayout layout)
880 {
881     armnn::TensorInfo inputTensorInfo({1, 1, 10, 10}, ArmnnType);
882     std::vector<float> inputNoQuantizedValues =
883     {
884         1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
885         1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
886         1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
887         1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
888         1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
889         1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
890         1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
891         1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
892         1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
893         1, 1, 1, 1, 1, 1, 1, 1, 1, 1
894     };
895
896     armnn::TensorInfo kernelTensorInfo({ 1, 1, 2, 2}, ArmnnType);
897     std::vector<float> kernelNoQuantizedValues =
898     {
899         1, 2,
900         3, 4
901     };
902
903     // Since the dilation rate is 2 this will dilate the kernel to be like 3x3: d(K-1)+1 --> 2 x (2-1) + 1 = 3,
904     // therefore the output will be 4x4: (I − K + 2P)/S +1 => trunc ( (10 - 3 + 2x2 ) / 3 + 1 )
905     // where, dilation size = d = 2; kernel size = K = 2; input size = I = 10; padding size = P = 2; stride = S = 3
906     armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4}, ArmnnType);
907     std::vector<float> outputExpectedNoQuantizedValues =
908     {
909         4,  7,  7, 3,
910         6, 10, 10, 4,
911         6, 10, 10, 4,
912         2,  3,  3, 1
913     };
914     uint32_t padLeft = 1;
915     uint32_t padTop = 1;
916     uint32_t padRight = 1;
917     uint32_t padBottom = 1;
918
919     return Convolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
920             workloadFactory,
921             memoryManager,
922             inputNoQuantizedValues,
923             inputTensorInfo,
924             kernelNoQuantizedValues,
925             kernelTensorInfo,
926             outputExpectedNoQuantizedValues,
927             outputTensorInfo,
928             2,
929             2,
930             layout,
931             padLeft,
932             padTop,
933             padRight,
934             padBottom,
935             3,
936             3,
937             biasEnabled
938             );
939 }
940
941 template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
942 Convolution2d3x3Dilation3x3Test<armnn::DataType::Float32, armnn::DataType::Float32>(
943     armnn::IWorkloadFactory&,
944     const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
945     bool,
946     armnn::DataLayout);
947
948 template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
949 Convolution2d3x3Dilation3x3Test<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
950     armnn::IWorkloadFactory&,
951     const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
952     bool,
953     armnn::DataLayout);
954
955 template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
956 Convolution2d3x3Dilation3x3Test<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
957     armnn::IWorkloadFactory&,
958     const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
959     bool,
960     armnn::DataLayout);
961
962 template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
963 Convolution2d2x3x3Dilation3x3Test<armnn::DataType::Float32, armnn::DataType::Float32>(
964     armnn::IWorkloadFactory&,
965     const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
966     bool,
967     armnn::DataLayout);
968
969 template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
970 Convolution2d2x3x3Dilation3x3Test<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
971     armnn::IWorkloadFactory&,
972     const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
973     bool,
974     armnn::DataLayout);
975
976 template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
977 Convolution2d2x3x3Dilation3x3Test<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
978     armnn::IWorkloadFactory&,
979     const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
980     bool,
981     armnn::DataLayout);
982
983 template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
984 Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<armnn::DataType::Float32, armnn::DataType::Float32>(
985     armnn::IWorkloadFactory &workloadFactory,
986     const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager,
987     bool biasEnabled,
988     const armnn::DataLayout layout);
989
990 template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
991 Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
992     armnn::IWorkloadFactory &workloadFactory,
993     const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager,
994     bool biasEnabled,
995     const armnn::DataLayout layout);
996
997 template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
998 Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
999     armnn::IWorkloadFactory &workloadFactory,
1000     const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager,
1001     bool biasEnabled,
1002     const armnn::DataLayout layout);
1003
1004 template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
1005          typename T = armnn::ResolveType<ArmnnType>>
1006 LayerTestResult<T, 4> DepthwiseConvolution2dAsymmetricTestCommon(
1007     armnn::IWorkloadFactory& workloadFactory,
1008     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1009     float qScale,
1010     int32_t qOffset,
1011     bool biasEnabled,
1012     const armnn::DataLayout layout)
1013 {
1014     // Use a single-batch 2-channel 5x5 image as input.
1015     armnn::TensorInfo inputTensorInfo({ 1, 2, 5, 5 }, ArmnnType);
1016     auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
1017         QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(),
1018         {
1019              0,  1,  2,  3,  4,
1020              5,  6,  7,  8,  9,
1021             10, 11, 12, 13, 14,
1022             15, 16, 17, 18, 19,
1023             20, 21, 22, 23, 24,
1024
1025             25, 26, 27, 28, 29,
1026             30, 31, 32, 33, 34,
1027             35, 36, 37, 38, 39,
1028             40, 41, 42, 43, 44,
1029             45, 46, 47, 48, 49
1030         })));
1031
1032     // Use a depth multiplier of 1 on a 2-channel 4x4 kernel.
1033     armnn::TensorInfo kernelTensorInfo({ 1, 2, 4, 4 }, ArmnnType);
1034     auto kernel = MakeTensor<T, 4>(kernelTensorInfo, std::vector<T>(
1035         QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(),
1036         {
1037             32, 31, 30, 29,
1038             28, 27, 26, 25,
1039             24, 23, 22, 21,
1040             20, 19, 18, 17,
1041
1042             16, 15, 14, 13,
1043             12, 11, 10,  9,
1044              8,  7,  6,  5,
1045              4,  3,  2,  1
1046         })));
1047
1048     // Expected output is 1 batch of a 2-channel 5x5 image.
1049     // Calculated using the python tensorflow library with strideX=1, strideY=1.
1050     armnn::TensorInfo outputTensorInfo({ 1, 2, 5, 5 }, ArmnnType);
1051     boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(
1052         QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(),
1053         {
1054             1062, 1580, 1850, 1530, 1117,
1055             2140, 3108, 3500, 2842, 2042,
1056             3580, 5068, 5460, 4342, 3062,
1057             3618, 5072, 5390, 4248, 2971,
1058             3074, 4282, 4510, 3533, 2457,
1059
1060             1550, 2284, 2362, 1955, 1428,
1061             2910, 4206, 4342, 3528, 2536,
1062             3390, 4886, 5022, 4068, 2916,
1063             3566, 5056, 5182, 4133, 2922,
1064             3100, 4352, 4452, 3517, 2465
1065         })));
1066
1067     return DepthwiseConvolution2dAsymmetricTestImpl<ArmnnType, ArmnnBType>(
1068         workloadFactory,
1069         memoryManager,
1070         input,
1071         kernel,
1072         GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
1073         expectedOutput,
1074         qScale,
1075         qOffset,
1076         layout,
1077         1,  // Padding left.
1078         1,  // Padding top.
1079         2,  // Padding right.
1080         2,  // Padding bottom.
1081         1,  // strideX
1082         1); // strideY
1083 }
1084
1085 template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
1086          typename T = armnn::ResolveType<ArmnnType>>
1087 LayerTestResult<T, 4> DepthwiseConvolution2dNhwcTestCommon(
1088     armnn::IWorkloadFactory& workloadFactory,
1089     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1090     float qScale,
1091     int32_t qOffset,
1092     bool biasEnabled)
1093 {
1094     auto layout = armnn::DataLayout::NHWC;
1095
1096     armnn::TensorInfo inputTensorInfo({ 1, 2, 5, 5}, ArmnnType);
1097     auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
1098         QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(),
1099         {
1100              0,  1,  2,  3,  4,
1101              5,  6,  7,  8,  9,
1102             10, 11, 12, 13, 14,
1103             15, 16, 17, 18, 19,
1104             20, 21, 22, 23, 24,
1105
1106             25, 26, 27, 28, 29,
1107             30, 31, 32, 33, 34,
1108             35, 36, 37, 38, 39,
1109             40, 41, 42, 43, 44,
1110             45, 46, 47, 48, 49
1111         })));
1112
1113     armnn::TensorInfo kernelTensorInfo({ 1, 2, 4, 4 }, ArmnnType);
1114     auto kernel = MakeTensor<T, 4>(kernelTensorInfo, std::vector<T>(
1115         QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(),
1116         {
1117              32, 31, 30, 29,
1118              28, 27, 26, 25,
1119              24, 23, 22, 21,
1120              20, 19, 18, 17,
1121
1122              16, 15, 14, 13,
1123              12, 11, 10,  9,
1124               8,  7,  6,  5,
1125               4,  3,  2,  1
1126         })));
1127
1128     armnn::TensorInfo outputTensorInfo({ 1, 2, 5, 5}, ArmnnType);
1129     boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(
1130         QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(),
1131         {
1132             1062, 1580, 1850, 1530, 1117,
1133             2140, 3108, 3500, 2842, 2042,
1134             3580, 5068, 5460, 4342, 3062,
1135             3618, 5072, 5390, 4248, 2971,
1136             3074, 4282, 4510, 3533, 2457,
1137
1138             1550, 2284, 2362, 1955, 1428,
1139             2910, 4206, 4342, 3528, 2536,
1140             3390, 4886, 5022, 4068, 2916,
1141             3566, 5056, 5182, 4133, 2922,
1142             3100, 4352, 4452, 3517, 2465
1143         })));
1144
1145     return DepthwiseConvolution2dTestImpl<ArmnnType, ArmnnBType>(
1146         workloadFactory,
1147         memoryManager,
1148         input,
1149         kernel,
1150         GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
1151         expectedOutput,
1152         qScale,
1153         qOffset,
1154         layout,
1155         1,  // Padding left.
1156         1,  // Padding top.
1157         2,  // Padding right.
1158         2,  // Padding bottom.
1159         1,  // strideX
1160         1);  // strideY
1161 }
1162
1163 template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
1164          typename T = armnn::ResolveType<ArmnnType>>
1165 LayerTestResult<T, 4> SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTestCommon(
1166     armnn::IWorkloadFactory& workloadFactory,
1167     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1168     float qScale,
1169     int32_t qOffset,
1170     bool biasEnabled)
1171 {
1172     auto layout = armnn::DataLayout::NHWC;
1173
1174     armnn::TensorInfo inputTensorInfo({ 1, 1, 9, 9}, ArmnnType);
1175     auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
1176         QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(),
1177         {
1178              0, 0, 0, 0, 0, 0, 0, 0, 0,
1179              0, 0, 0, 0, 0, 0, 0, 0, 0,
1180              0, 0, 0, 0, 0, 0, 0, 0, 0,
1181              0, 0, 0, 1, 1, 1, 0, 0, 0,
1182              0, 0, 0, 1, 1, 1, 0, 0, 0,
1183              0, 0, 0, 1, 1, 1, 0, 0, 0,
1184              0, 0, 0, 0, 0, 0, 0, 0, 0,
1185              0, 0, 0, 0, 0, 0, 0, 0, 0,
1186              0, 0, 0, 0, 0, 0, 0, 0, 0
1187         })));
1188
1189     armnn::TensorInfo kernelTensorInfo({ 1, 1, 3, 3}, ArmnnType);
1190     auto kernel = MakeTensor<T, 4>(kernelTensorInfo, std::vector<T>(
1191         QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(),
1192         {
1193              1, 2, 3,
1194              4, 5, 6,
1195              7, 8, 9
1196         })));
1197
1198     uint32_t padLeft = 0;
1199     uint32_t padTop = 0;
1200     uint32_t padRight = 0;
1201     uint32_t padBottom = 0;
1202     uint32_t strideX  = 1;
1203     uint32_t strideY  = 1;
1204     uint32_t dilationX  = 3;
1205     uint32_t dilationY  = 3;
1206
1207     // Since the dilation rate is 3 this will reduce the size of the output from 9x9 to 3x3 of all 5s.
1208     armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3}, ArmnnType);
1209     boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(
1210         QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(),
1211         {
1212              5, 5, 5,
1213              5, 5, 5,
1214              5, 5, 5
1215         })));
1216
1217     return DepthwiseConvolution2dTestImpl<ArmnnType, ArmnnBType>(
1218         workloadFactory,
1219         memoryManager,
1220         input,
1221         kernel,
1222         GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
1223         expectedOutput,
1224         qScale,
1225         qOffset,
1226         layout,
1227         padLeft,
1228         padTop,
1229         padRight,
1230         padBottom,
1231         strideX,
1232         strideY,
1233         dilationX,
1234         dilationY);
1235 }
1236
1237
1238 template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
1239 LayerTestResult<T, 4> DepthwiseConvolution2d3x3DilationTestCommon(
1240         armnn::IWorkloadFactory& workloadFactory,
1241         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1242         const std::vector<float>& inputNoQuantizedValues,
1243         armnn::TensorInfo& inputTensorInfo,
1244         const std::vector<float>& kernelNoQuantizedValues,
1245         armnn::TensorInfo& kernelTensorInfo,
1246         const std::vector<float>& outputExpectedNoQuantizedValues,
1247         armnn::TensorInfo& outputTensorInfo,
1248         uint32_t dilationX,
1249         uint32_t dilationY,
1250         armnn::DataLayout layout = armnn::DataLayout::NCHW,
1251         bool biasEnabled = false)
1252 {
1253     float qScale;
1254     int32_t qOffset;
1255     switch (ArmnnType)
1256     {
1257         case armnn::DataType::QuantisedAsymm8:
1258         {
1259             qScale = 0.1f;
1260             qOffset = 128;
1261             break;
1262         }
1263         case armnn::DataType::QuantisedSymm16:
1264         {
1265             qScale = 0.1f;
1266             qOffset = 0;
1267             break;
1268         }
1269         case armnn::DataType::Float32:
1270         default:
1271         {
1272             qScale = 0.f;
1273             qOffset = 0;
1274             break;
1275         }
1276     }
1277
1278     inputTensorInfo.SetQuantizationScale(qScale);
1279     inputTensorInfo.SetQuantizationOffset(qOffset);
1280     kernelTensorInfo.SetQuantizationScale(qScale);
1281     kernelTensorInfo.SetQuantizationOffset(qOffset);
1282     outputTensorInfo.SetQuantizationScale(qScale);
1283     outputTensorInfo.SetQuantizationOffset(qOffset);
1284
1285     auto input = MakeTensor<T, 4>(inputTensorInfo,
1286                                   std::vector<T>(QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(),
1287                                                                     inputTensorInfo.GetQuantizationOffset(),
1288                                                                     inputNoQuantizedValues)));
1289     auto kernel = MakeTensor<T, 4>(kernelTensorInfo,
1290                                    std::vector<T>(QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(),
1291                                                                      kernelTensorInfo.GetQuantizationOffset(),
1292                                                                      kernelNoQuantizedValues)));
1293     auto expectedOutput = MakeTensor<T, 4>(outputTensorInfo,
1294                                            std::vector<T>(QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(),
1295                                                                              outputTensorInfo.GetQuantizationOffset(),
1296                                                                              outputExpectedNoQuantizedValues)));
1297
1298     uint32_t padLeft = 0;
1299     uint32_t padTop = 0;
1300     uint32_t padRight = 0;
1301     uint32_t padBottom = 0;
1302     uint32_t strideX  = 1;
1303     uint32_t strideY  = 1;
1304
1305     return DepthwiseConvolution2dTestImpl<ArmnnType, ArmnnBType>(
1306             workloadFactory,
1307             memoryManager,
1308             input,
1309             kernel,
1310             GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
1311             expectedOutput,
1312             qScale,
1313             qOffset,
1314             layout,
1315             padLeft,
1316             padTop,
1317             padRight,
1318             padBottom,
1319             strideX,
1320             strideY,
1321             dilationX,
1322             dilationY);
1323 }
1324
1325 template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
1326 LayerTestResult<T, 4> DepthwiseConvolution2d3x3Dilation3x3Test(
1327         armnn::IWorkloadFactory& workloadFactory,
1328         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1329         bool biasEnabled,
1330         const armnn::DataLayout layout)
1331 {
1332     armnn::TensorInfo inputTensorInfo({1, 1, 10, 10}, ArmnnType);
1333     std::vector<float> inputNoQuantizedValues =
1334             {
1335                     0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1336                     0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1337                     0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1338                     0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1339                     0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1340                     0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1341                     0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1342                     0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1343                     0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1344                     0, 0, 0, 0, 0, 0, 0, 0, 0, 0
1345             };
1346
1347     armnn::TensorInfo kernelTensorInfo({ 1, 1, 3, 3}, ArmnnType);
1348     std::vector<float> kernelNoQuantizedValues =
1349             {
1350                     1, 2, 3,
1351                     4, 5, 6,
1352                     7, 8, 9
1353             };
1354
1355     // Since the dilation rate is 3 this will dilate the kernel to be like 7x7,
1356     // therefore the output will be 4x4: (I−K+2P)/S +1 => (10-7 +0)/1 +1
1357     armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4}, ArmnnType);
1358     std::vector<float> outputExpectedNoQuantizedValues =
1359             {
1360                     6., 5., 5., 5.,
1361                     6., 5., 5., 5.,
1362                     6., 5., 5., 5.,
1363                     3., 2., 2., 2.
1364             };
1365
1366     return DepthwiseConvolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
1367             workloadFactory,
1368             memoryManager,
1369             inputNoQuantizedValues,
1370             inputTensorInfo,
1371             kernelNoQuantizedValues,
1372             kernelTensorInfo,
1373             outputExpectedNoQuantizedValues,
1374             outputTensorInfo,
1375             3,
1376             3,
1377             layout,
1378             biasEnabled);
1379 }
1380
1381 template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
1382 LayerTestResult<T, 4> DepthwiseConvolution2d2x3x3Dilation3x3Test(
1383         armnn::IWorkloadFactory& workloadFactory,
1384         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1385         bool biasEnabled,
1386         const armnn::DataLayout layout)
1387 {
1388     armnn::TensorInfo inputTensorInfo({1, 2, 10, 10}, ArmnnType);
1389     std::vector<float> inputNoQuantizedValues =
1390             {
1391                     0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1392                     0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1393                     0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1394                     0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1395                     0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1396                     0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1397                     0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1398                     0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1399                     0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1400                     0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1401
1402                     0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1403                     0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1404                     0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1405                     0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1406                     0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1407                     0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1408                     0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1409                     0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1410                     0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1411                     0, 0, 0, 0, 0, 0, 0, 0, 0, 0
1412             };
1413
1414     armnn::TensorInfo kernelTensorInfo({ 1, 2, 3, 3}, ArmnnType);
1415     std::vector<float> kernelNoQuantizedValues =
1416             {
1417                     1, 2, 3,
1418                     4, 5, 6,
1419                     7, 8, 9,
1420
1421                     1, 2, 3,
1422                     4, 5, 6,
1423                     7, 8, 9
1424             };
1425
1426     // Since the dilation rate is 3 this will dilate the kernel to be like 7x7,
1427     // therefore the output will be 2x4x4: (I−K+2P)/S +1 => (10-7 +0)/1 +1
1428     armnn::TensorInfo outputTensorInfo({ 1, 2, 4, 4}, ArmnnType);
1429     std::vector<float> outputExpectedNoQuantizedValues =
1430             {
1431                     6., 5., 5., 5.,
1432                     6., 5., 5., 5.,
1433                     6., 5., 5., 5.,
1434                     3., 2., 2., 2.,
1435
1436                     6., 5., 5., 5.,
1437                     6., 5., 5., 5.,
1438                     6., 5., 5., 5.,
1439                     3., 2., 2., 2.
1440             };
1441
1442     return DepthwiseConvolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
1443             workloadFactory,
1444             memoryManager,
1445             inputNoQuantizedValues,
1446             inputTensorInfo,
1447             kernelNoQuantizedValues,
1448             kernelTensorInfo,
1449             outputExpectedNoQuantizedValues,
1450             outputTensorInfo,
1451             3,
1452             3,
1453             layout,
1454             biasEnabled);
1455 }
1456
1457
1458 template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
1459 DepthwiseConvolution2d3x3Dilation3x3Test<armnn::DataType::Float32, armnn::DataType::Float32>(
1460         armnn::IWorkloadFactory&,
1461         const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
1462         bool,
1463         armnn::DataLayout);
1464
1465 template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
1466 DepthwiseConvolution2d3x3Dilation3x3Test<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
1467         armnn::IWorkloadFactory&,
1468         const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
1469         bool,
1470         armnn::DataLayout);
1471
1472 template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
1473 DepthwiseConvolution2d3x3Dilation3x3Test<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
1474         armnn::IWorkloadFactory&,
1475         const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
1476         bool,
1477         armnn::DataLayout);
1478
1479 template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
1480 DepthwiseConvolution2d2x3x3Dilation3x3Test<armnn::DataType::Float32, armnn::DataType::Float32>(
1481         armnn::IWorkloadFactory&,
1482         const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
1483         bool,
1484         armnn::DataLayout);
1485
1486 template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
1487 DepthwiseConvolution2d2x3x3Dilation3x3Test<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
1488         armnn::IWorkloadFactory&,
1489         const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
1490         bool,
1491         armnn::DataLayout);
1492
1493 template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
1494 DepthwiseConvolution2d2x3x3Dilation3x3Test<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
1495         armnn::IWorkloadFactory&,
1496         const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
1497         bool,
1498         armnn::DataLayout);
1499
1500 LayerTestResult<float, 4> DepthwiseConvolution2dTest(
1501     armnn::IWorkloadFactory& workloadFactory,
1502     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1503     bool biasEnabled,
1504     const armnn::DataLayout layout)
1505 {
1506     return DepthwiseConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
1507         workloadFactory, memoryManager, 0.0f, 0, biasEnabled, layout);
1508 }
1509
1510 LayerTestResult<float, 4> DepthwiseConvolution2dDepthNhwcTest(
1511     armnn::IWorkloadFactory& workloadFactory,
1512     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1513     bool biasEnabled)
1514 {
1515     return DepthwiseConvolution2dNhwcTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
1516         workloadFactory, memoryManager, 0.0f, 0, biasEnabled);
1517 }
1518
1519 LayerTestResult<float, 4> DepthwiseConvolution2dDepthMul1Test(
1520     armnn::IWorkloadFactory& workloadFactory,
1521     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1522     bool biasEnabled,
1523     const armnn::DataLayout layout)
1524 {
1525     return DepthwiseConvolution2dDepthMul1TestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
1526         workloadFactory, memoryManager, 0.0f, 0, biasEnabled, layout);
1527 }
1528
1529 LayerTestResult<float, 4> DepthwiseConvolution2dDepthMul64Test(
1530     armnn::IWorkloadFactory& workloadFactory,
1531     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1532 {
1533     armnn::TensorInfo inputTensorInfo({ 1, 1, 2, 2 }, armnn::DataType::Float32);
1534     auto input = MakeTensor<float, 4>(inputTensorInfo, { 1.f, 2.f, 3.f, 4.f });
1535
1536     std::vector<float> kernelData;
1537     std::vector<float> singleDepthKernel{ 1.f, -1.f, -1.f, 1.f };
1538     for (unsigned int i = 0; i < 64; ++i)
1539     {
1540         kernelData.insert(kernelData.end(), singleDepthKernel.begin(), singleDepthKernel.end());
1541     }
1542     armnn::TensorInfo kernelTensorInfo({ 64, 1, 2, 2 }, armnn::DataType::Float32);
1543     auto kernel = MakeTensor<float, 4>(kernelTensorInfo, kernelData);
1544
1545     std::vector<float> expectedOutputData(64, 0.f);
1546     armnn::TensorInfo outputTensorInfo({ 1, 64, 1, 1 }, armnn::DataType::Float32);
1547     auto expectedOutput = MakeTensor<float, 4>(outputTensorInfo, expectedOutputData);
1548
1549     return DepthwiseConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
1550             workloadFactory,
1551             memoryManager,
1552             input,
1553             kernel,
1554             boost::multi_array<float, 1>(),
1555             expectedOutput,
1556             0.f,
1557             0,
1558             armnn::DataLayout::NCHW);
1559 }
1560
1561 LayerTestResult<float, 4> DepthwiseConvolution2dAsymmetricTest(
1562     armnn::IWorkloadFactory& workloadFactory,
1563     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1564     bool biasEnabled,
1565     const armnn::DataLayout layout)
1566 {
1567     return DepthwiseConvolution2dAsymmetricTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
1568         workloadFactory, memoryManager, 0.0f, 0, biasEnabled, layout);
1569 }
1570
1571 LayerTestResult<uint8_t, 4> DepthwiseConvolution2dUint8Test(
1572     armnn::IWorkloadFactory& workloadFactory,
1573     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1574     bool biasEnabled,
1575     const armnn::DataLayout layout)
1576 {
1577     return DepthwiseConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
1578         workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
1579 }
1580
1581 LayerTestResult<uint8_t, 4> DepthwiseConvolution2dDepthMul1Uint8Test(
1582     armnn::IWorkloadFactory& workloadFactory,
1583     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1584     bool biasEnabled,
1585     const armnn::DataLayout layout)
1586 {
1587     return DepthwiseConvolution2dDepthMul1TestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
1588         workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
1589 }
1590
1591 LayerTestResult<float, 4> SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTest(
1592         armnn::IWorkloadFactory& workloadFactory,
1593         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1594 {
1595     return SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
1596             workloadFactory,
1597             memoryManager,
1598             0.f,
1599             0,
1600             false);
1601 }
1602
1603 LayerTestResult<int16_t, 4> DepthwiseConvolution2dInt16Test(
1604         armnn::IWorkloadFactory& workloadFactory,
1605         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1606         bool biasEnabled,
1607         const armnn::DataLayout layout)
1608 {
1609     return DepthwiseConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
1610         workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
1611 }
1612
1613 LayerTestResult<int16_t, 4> DepthwiseConvolution2dDepthMul1Int16Test(
1614                 armnn::IWorkloadFactory& workloadFactory,
1615                 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1616                 bool biasEnabled,
1617                 const armnn::DataLayout layout)
1618 {
1619     return DepthwiseConvolution2dDepthMul1TestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
1620         workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
1621 }
1622
1623 LayerTestResult<float, 4> CompareDepthwiseConvolution2dFloatTest(
1624     armnn::IWorkloadFactory& workloadFactory,
1625     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1626     armnn::IWorkloadFactory& refWorkloadFactory,
1627     const armnn::DataLayout layout)
1628 {
1629     return CompareDepthwiseConvolution2dTestImpl<armnn::DataType::Float32>(
1630         workloadFactory, memoryManager, refWorkloadFactory, layout);
1631 }
1632
1633 LayerTestResult<uint8_t, 4> CompareDepthwiseConvolution2dUint8Test(
1634     armnn::IWorkloadFactory& workloadFactory,
1635     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1636     armnn::IWorkloadFactory& refWorkloadFactory,
1637     const armnn::DataLayout layout)
1638 {
1639     return CompareDepthwiseConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8>(
1640         workloadFactory, memoryManager, refWorkloadFactory, layout);
1641 }
1642
1643 LayerTestResult<float,4> SimpleNormalizationAcrossTest(
1644     armnn::IWorkloadFactory& workloadFactory,
1645     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1646 {
1647     auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
1648     auto normChannel = armnn::NormalizationAlgorithmChannel::Across;
1649     return SimpleNormalizationTestImpl(workloadFactory, memoryManager, normChannel, normMethod);
1650 }
1651
1652 LayerTestResult<float,4> SimpleNormalizationWithinTest(
1653     armnn::IWorkloadFactory& workloadFactory,
1654     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1655 {
1656     auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
1657     auto normChannel = armnn::NormalizationAlgorithmChannel::Within;
1658     return SimpleNormalizationTestImpl(workloadFactory, memoryManager, normChannel, normMethod);
1659 }
1660
1661 LayerTestResult<float,4> SimpleNormalizationAcrossNhwcTest(
1662     armnn::IWorkloadFactory& workloadFactory,
1663     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1664 {
1665     auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
1666     auto normChannel = armnn::NormalizationAlgorithmChannel::Across;
1667     return SimpleNormalizationNhwcTestImpl(workloadFactory, memoryManager, normChannel, normMethod);
1668 }
1669
1670 LayerTestResult<float,2> SimpleSoftmaxTest(
1671     armnn::IWorkloadFactory& workloadFactory,
1672     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1673     float beta)
1674 {
1675     return SimpleSoftmaxTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, beta);
1676 }
1677
1678 LayerTestResult<float,2> SimpleAxisSoftmaxTest(
1679         armnn::IWorkloadFactory& workloadFactory,
1680         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1681         float beta,
1682         int axis)
1683 {
1684     return SimpleSoftmaxTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, beta, axis);
1685 }
1686
1687 LayerTestResult<float,3> Simple3dSoftmaxTest(
1688         armnn::IWorkloadFactory& workloadFactory,
1689         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1690         float beta)
1691 {
1692     Simple3dSoftmaxOutputData data;
1693     return Simple3dSoftmaxTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, beta,
1694                                                              data.inputShape, data.outputData, data.inputData);
1695 }
1696
1697 LayerTestResult<float,3> Simple3dAxisSoftmaxTest(
1698         armnn::IWorkloadFactory& workloadFactory,
1699         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1700         float beta,
1701         int axis)
1702 {
1703     armnn::TensorShape inputShape;
1704     std::vector<float> inputData;
1705     std::vector<float> outputData;
1706     switch (axis)
1707     {
1708     case -3:
1709     case 0:
1710         {
1711             inputShape = {5, 2, 2};
1712
1713             inputData =
1714                     {
1715                             17.0f, -1.0f, 17.0f, -1.0f, 16.0f, -2.0f, 16.0f, -2.0f, 15.0f, -3.0f,
1716
1717                             15.0f, -3.0f, 14.0f, -4.0f, 14.0f, -4.0f, 1.0f, -17.0f, 1.0f, -17.0f
1718                     };
1719
1720             outputData =
1721                     {
1722                             0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f,
1723                             0.236882800924671f,
1724                             0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f,
1725                             0.087144312427294f,
1726
1727                             0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f,
1728                             0.032058600957022f,
1729                             0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f,
1730                             7.246299848982885e-08f
1731                     };
1732             break;
1733         }
1734     case -2:
1735     case 1:
1736         {
1737             inputShape = {2, 5, 2};
1738
1739             inputData =
1740                     {
1741                             17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f,
1742
1743                             17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f
1744                     };
1745
1746             outputData =
1747                     {
1748                             0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f,
1749                             0.087144312427294f,
1750                             0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f,
1751                             7.246299848982885e-08f,
1752
1753                             0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f,
1754                             0.087144312427294f,
1755                             0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f,
1756                             7.246299848982885e-08f
1757                     };
1758         break;
1759         }
1760     case -1:
1761     case 2:
1762         {
1763             inputShape = {2, 2, 5};
1764
1765             inputData =
1766                     {
1767                             17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f,
1768                             17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f
1769                     };
1770
1771             outputData =
1772                     {
1773                             0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
1774                             7.246299848982885e-08f,
1775                             0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
1776                             7.246299848982885e-08f,
1777
1778                             0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
1779                             7.246299848982885e-08f,
1780                             0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
1781                             7.246299848982885e-08f
1782                     };
1783             break;
1784         }
1785     }
1786
1787     return Simple3dSoftmaxTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, beta,
1788                                                              inputShape, outputData, inputData, axis);
1789 }
1790
1791 LayerTestResult<float,4> Simple4dSoftmaxTest(
1792         armnn::IWorkloadFactory& workloadFactory,
1793         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1794         float beta)
1795 {
1796     Simple4dSoftmaxData data;
1797     return Simple4dSoftmaxTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, beta, data.inputShape,
1798                                                              data.outputData, data.inputData);
1799 }
1800
1801 LayerTestResult<float,4> Simple4dAxisSoftmaxTest(
1802         armnn::IWorkloadFactory& workloadFactory,
1803         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1804         float beta,
1805         int axis)
1806 {
1807     armnn::TensorShape inputShape;
1808     std::vector<float> inputData;
1809     std::vector<float> outputData;
1810     switch (axis)
1811     {
1812     case -4:
1813     case 0:
1814         {
1815             inputShape = {5, 2, 2, 2};
1816
1817             inputData =
1818                     {
1819                             17.0f, -1.0f, 17.0f, -1.0f, 17.0f, -1.0f, 17.0f, -1.0f, 16.0f, -2.0f,
1820                             16.0f, -2.0f, 16.0f, -2.0f, 16.0f, -2.0f, 15.0f, -3.0f, 15.0f, -3.0f,
1821                             15.0f, -3.0f, 15.0f, -3.0f, 14.0f, -4.0f, 14.0f, -4.0f, 14.0f, -4.0f,
1822                             14.0f, -4.0f, 1.0f, -17.0f, 1.0f, -17.0f, 1.0f, -17.0f, 1.0f, -17.0f
1823                     };
1824
1825             outputData =
1826                     {
1827                             0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f,
1828                             0.643914213228014f,
1829                             0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.236882800924671f,
1830                             0.236882800924671f,
1831                             0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.236882800924671f,
1832                             0.236882800924671f,
1833                             0.236882800924671f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f,
1834                             0.087144312427294f,
1835
1836                             0.087144312427294f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f,
1837                             0.032058600957022f,
1838                             0.032058600957022f, 0.032058600957022f, 0.032058600957022f, 0.032058600957022f,
1839                             0.032058600957022f,
1840                             0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f,
1841                             7.246299848982885e-08f,
1842                             7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f,
1843                             7.246299848982885e-08f, 7.246299848982885e-08f
1844                     };
1845             break;
1846         }
1847     case -3:
1848     case 1:
1849         {
1850             inputShape = {2, 5, 2, 2};
1851
1852             inputData =
1853                     {
1854                             17.0f, -1.0f, 17.0f, -1.0f, 16.0f, -2.0f, 16.0f, -2.0f, 15.0f, -3.0f,
1855                             15.0f, -3.0f, 14.0f, -4.0f, 14.0f, -4.0f, 1.0f, -17.0f, 1.0f, -17.0f,
1856                             17.0f, -1.0f, 17.0f, -1.0f, 16.0f, -2.0f, 16.0f, -2.0f, 15.0f, -3.0f,
1857                             15.0f, -3.0f, 14.0f, -4.0f, 14.0f, -4.0f, 1.0f, -17.0f, 1.0f, -17.0f
1858                     };
1859
1860             outputData =
1861                     {
1862                             0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f,
1863                             0.236882800924671f,
1864                             0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f,
1865                             0.087144312427294f,
1866                             0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f,
1867                             0.032058600957022f,
1868                             0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f,
1869                             7.246299848982885e-08f,
1870
1871
1872                             0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f,
1873                             0.236882800924671f,
1874                             0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f,
1875                             0.087144312427294f,
1876                             0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f,
1877                             0.032058600957022f,
1878                             0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f,
1879                             7.246299848982885e-08f
1880                     };
1881             break;
1882         }
1883     case -2:
1884     case 2:
1885         {
1886         inputShape = {2, 2, 5, 2};
1887
1888         inputData =
1889                 {
1890                         17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f,
1891                         17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f,
1892                         17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f,
1893                         17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f
1894                 };
1895
1896         outputData =
1897                 {
1898                         0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f,
1899                         0.087144312427294f,
1900                         0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f,
1901                         7.246299848982885e-08f,
1902                         0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f,
1903                         0.087144312427294f,
1904                         0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f,
1905                         7.246299848982885e-08f,
1906
1907                         0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f,
1908                         0.087144312427294f,
1909                         0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f,
1910                         7.246299848982885e-08f,
1911                         0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f,
1912                         0.087144312427294f,
1913                         0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f,
1914                         7.246299848982885e-08f
1915                 };
1916         break;
1917         }
1918     case -1:
1919     case 3:
1920         {
1921             inputShape = {2, 2, 2, 5};
1922
1923             inputData =
1924                     {
1925                             17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f,
1926                             17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f,
1927                             17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f,
1928                             17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f
1929                     };
1930
1931             outputData =
1932                     {
1933                             0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
1934                             7.246299848982885e-08f,
1935                             0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
1936                             7.246299848982885e-08f,
1937                             0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
1938                             7.246299848982885e-08f,
1939                             0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
1940                             7.246299848982885e-08f,
1941
1942                             0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
1943                             7.246299848982885e-08f,
1944                             0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
1945                             7.246299848982885e-08f,
1946                             0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
1947                             7.246299848982885e-08f,
1948                             0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
1949                             7.246299848982885e-08f
1950                     };
1951             break;
1952         }
1953     }
1954
1955     return Simple4dSoftmaxTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, beta, inputShape,
1956                                                              outputData, inputData, axis);
1957 }
1958
1959 LayerTestResult<uint8_t,2> SimpleSoftmaxUint8Test(
1960     armnn::IWorkloadFactory& workloadFactory,
1961     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1962     float beta)
1963 {
1964     return SimpleSoftmaxTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, beta);
1965 }
1966
1967 LayerTestResult<uint8_t,3> Simple3dSoftmaxUint8Test(
1968         armnn::IWorkloadFactory& workloadFactory,
1969         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1970         float beta)
1971 {
1972     Simple3dSoftmaxOutputData data;
1973     return Simple3dSoftmaxTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, beta,
1974                                                                      data.inputShape, data.outputData, data.inputData);
1975 }
1976
1977 LayerTestResult<uint8_t,4> Simple4dSoftmaxUint8Test(
1978         armnn::IWorkloadFactory& workloadFactory,
1979         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1980         float beta)
1981 {
1982     Simple4dSoftmaxData data;
1983
1984     return Simple4dSoftmaxTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, beta,
1985                                                                      data.inputShape, data.outputData, data.inputData);
1986 }
1987
1988 LayerTestResult<int16_t,2> SimpleSoftmaxUint16Test(
1989         armnn::IWorkloadFactory& workloadFactory,
1990         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1991         float beta)
1992 {
1993     return SimpleSoftmaxTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, beta);
1994 }
1995
1996 LayerTestResult<int16_t,3> Simple3dSoftmaxUint16Test(
1997         armnn::IWorkloadFactory& workloadFactory,
1998         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1999         float beta)
2000 {
2001     Simple3dSoftmaxOutputData data;
2002     return Simple3dSoftmaxTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, beta,
2003                                                                      data.inputShape, data.outputData, data.inputData);
2004 }
2005
2006 LayerTestResult<int16_t,4> Simple4dSoftmaxUint16Test(
2007         armnn::IWorkloadFactory& workloadFactory,
2008         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2009         float beta)
2010 {
2011     Simple4dSoftmaxData data;
2012
2013     return Simple4dSoftmaxTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, beta,
2014                                                                      data.inputShape, data.outputData, data.inputData);
2015 }
2016
2017 LayerTestResult<float,4> CompareNormalizationTest(
2018     armnn::IWorkloadFactory& workloadFactory,
2019     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2020     armnn::IWorkloadFactory& refWorkloadFactory,
2021     armnn::NormalizationAlgorithmChannel normChannel,
2022     armnn::NormalizationAlgorithmMethod normMethod)
2023 {
2024     return CompareNormalizationTestImpl(workloadFactory, memoryManager, refWorkloadFactory, normChannel, normMethod);
2025 }
2026
2027 LayerTestResult<float,2> CompareSoftmaxTest(
2028     armnn::IWorkloadFactory& workloadFactory,
2029     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2030     armnn::IWorkloadFactory& refWorkloadFactory,
2031     float beta)
2032 {
2033     return CompareSoftmaxTestImpl<armnn::DataType::Float32>(
2034         workloadFactory, memoryManager, refWorkloadFactory, beta);
2035 }
2036
2037 LayerTestResult<uint8_t,2> CompareSoftmaxUint8Test(
2038     armnn::IWorkloadFactory& workloadFactory,
2039     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2040     armnn::IWorkloadFactory& refWorkloadFactory,
2041     float beta)
2042 {
2043     return CompareSoftmaxTestImpl<armnn::DataType::QuantisedAsymm8>(
2044         workloadFactory, memoryManager, refWorkloadFactory, beta);
2045 }
2046
2047 std::vector<LayerTestResult<float,3>> SplitterTest(
2048     armnn::IWorkloadFactory& workloadFactory,
2049     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2050 {
2051     return SplitterTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
2052 }
2053
2054 std::vector<LayerTestResult<uint8_t,3>> SplitterUint8Test(
2055     armnn::IWorkloadFactory& workloadFactory,
2056     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2057 {
2058     return SplitterTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
2059 }
2060
2061 std::vector<LayerTestResult<int16_t,3>> SplitterInt16Test(
2062     armnn::IWorkloadFactory& workloadFactory,
2063     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2064 {
2065     return SplitterTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.0f, 0);
2066 }
2067
2068 LayerTestResult<float, 3> CopyViaSplitterTest(
2069     armnn::IWorkloadFactory& workloadFactory,
2070     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2071 {
2072     return CopyViaSplitterTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
2073 }
2074
2075 LayerTestResult<uint8_t, 3> CopyViaSplitterUint8Test(
2076     armnn::IWorkloadFactory& workloadFactory,
2077     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2078 {
2079     return CopyViaSplitterTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
2080 }
2081
2082 LayerTestResult<int16_t, 3> CopyViaSplitterInt16Test(
2083         armnn::IWorkloadFactory& workloadFactory,
2084         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2085 {
2086     return CopyViaSplitterTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.0f, 0);
2087 }
2088
2089 #if defined(ARMCOMPUTEREF_ENABLED)
2090
2091 // The LSTM test units are run only for the reference backend at the moment
2092
2093 void LstmUtilsZeroVectorTest()
2094 {
2095     armnn::TensorInfo inputDesc({4}, armnn::DataType::Float32);
2096     boost::multi_array<float, 1> input = MakeTensor<float, 1>(inputDesc, std::vector<float>(
2097             {2., 3., 3., 4.}));
2098
2099     boost::multi_array<float, 1> expectedOutput = MakeTensor<float, 1>(inputDesc, std::vector<float>(
2100             {0., 0., 0., 0.}));
2101
2102     return LstmUtilsZeroVectorTestImpl<armnn::DataType::Float32>(input, 4, expectedOutput);
2103 }
2104
2105 void LstmUtilsMeanStddevNormalizationNoneZeroInputTest()
2106 {
2107     uint32_t batchSize = 2;
2108     uint32_t vecSize = 4;
2109     armnn::TensorInfo inputDesc({batchSize, vecSize}, armnn::DataType::Float32);
2110     boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
2111             { 0.1f, 0.2f, 0.3f, 0.4f,      //batch 0
2112               0.9f, 1.0f, 1.1f, 1.2f }));  //batch 1
2113
2114     boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(inputDesc, std::vector<float>(
2115             { -1.34164071f, -0.447213531f, 0.44721365f,  1.34164071f,      //batch 0
2116               -1.34163153f, -0.447210163f, 0.447211236f, 1.3416326f  }));  //batch 1
2117
2118     return LstmUtilsMeanStddevNormalizationTestImpl<armnn::DataType::Float32>(input,
2119             vecSize, batchSize, expectedOutput);
2120 }
2121
2122 void LstmUtilsMeanStddevNormalizationAllZeroInputTest()
2123 {
2124     uint32_t batchSize = 2;
2125     uint32_t vecSize = 4;
2126     armnn::TensorInfo inputDesc({batchSize, vecSize}, armnn::DataType::Float32);
2127     boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
2128             { 0.0f, 0.0f, 0.0f, 0.0f,      //batch 0
2129               0.0f, 0.0f, 0.0f, 0.0f }));  //batch 1
2130
2131     boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(inputDesc, std::vector<float>(
2132             { 0.0f, 0.0f, 0.0f, 0.0f,      //batch 0
2133               0.0f, 0.0f, 0.0f, 0.0f }));  //batch 1
2134
2135     return LstmUtilsMeanStddevNormalizationTestImpl<armnn::DataType::Float32>(input,
2136             vecSize, batchSize, expectedOutput);
2137 }
2138
2139 void LstmUtilsMeanStddevNormalizationMixedZeroInputTest()
2140 {
2141     uint32_t batchSize = 2;
2142     uint32_t vecSize = 4;
2143     armnn::TensorInfo inputDesc({batchSize, vecSize}, armnn::DataType::Float32);
2144     boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
2145             { 0.0f, 0.0f, 0.0f, 0.0f,      //batch 0
2146               0.1f, 0.2f, 0.3f, 0.4f }));  //batch 1
2147
2148     boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(inputDesc, std::vector<float>(
2149             {         0.0f,          0.0f,        0.0f,        0.0f,      //batch 0
2150               -1.34164071f, -0.447213531f, 0.44721365f, 1.34164071f }));  //batch 1
2151
2152     return LstmUtilsMeanStddevNormalizationTestImpl<armnn::DataType::Float32>(input,
2153             vecSize, batchSize, expectedOutput);
2154 }
2155
2156
2157 void LstmUtilsVectorBatchVectorCwiseProductTest()
2158 {
2159     uint32_t batchSize = 4;
2160     uint32_t vecSize = 29;
2161     armnn::TensorInfo vecDesc({vecSize}, armnn::DataType::Float32);
2162     boost::multi_array<float, 1> vector = MakeTensor<float, 1>(vecDesc, std::vector<float>(
2163             {   1.1f,   2.2f,   3.3f,   4.4f,   5.5f,   6.6f,   7.7f,   8.8f,   9.9f, 10.1f,
2164               11.11f, 12.12f, 13.13f, 14.14f, 15.15f, 16.16f, 17.17f, 18.18f, 19.19f, 20.2f,
2165               21.21f, 22.22f, 23.23f, 24.24f, 25.25f, 26.26f, 27.27f, 28.28f,     0.0f}));
2166
2167     armnn::TensorInfo batchVecDesc({batchSize, vecSize}, armnn::DataType::Float32);
2168     boost::multi_array<float, 2> batchVector = MakeTensor<float, 2>(batchVecDesc, std::vector<float>(
2169             { /* batch 0 */
2170                 1.1f,   2.2f,   3.3f,   4.4f,   5.5f,   6.6f,   7.7f,   8.8f,   9.9f,  10.1f,
2171               11.11f, 12.12f, 13.13f, 14.14f, 15.15f, 16.16f, 17.17f, 18.18f, 19.19f,  20.2f,
2172               21.21f, 22.22f, 23.23f, 24.24f, 25.25f, 26.26f, 27.27f, 28.28f,   0.0f,
2173               /* batch 1 */
2174                 -1.1f,   -2.2f,   -3.3f,   -4.4f,   -5.5f,   -6.6f,   -7.7f,   -8.8f,   -9.9f, -10.1f,
2175               -11.11f, -12.12f, -13.13f, -14.14f, -15.15f, -16.16f, -17.17f, -18.18f, -19.19f, -20.2f,
2176               -21.21f, -22.22f, -23.23f, -24.24f, -25.25f, -26.26f, -27.27f, -28.28f,    0.0f,
2177               /* batch 2 */
2178                 1.1f,   -2.2f,   3.3f,   -4.4f,   5.5f,   -6.6f,   7.7f,   -8.8f,   9.9f, -10.1f,
2179               11.11f, -12.12f, 13.13f, -14.14f, 15.15f, -16.16f, 17.17f, -18.18f, 19.19f, -20.2f,
2180               21.21f, -22.22f, 23.23f, -24.24f, 25.25f, -26.26f, 27.27f, -28.28f,   0.0f,
2181               /* batch 3 */
2182                 -1.1f,   2.2f,   -3.3f,   4.4f,   -5.5f,   6.6f,   -7.7f,   8.8f,   -9.9f, 10.1f,
2183               -11.11f, 12.12f, -13.13f, 14.14f, -15.15f, 16.16f, -17.17f, 18.18f, -19.19f, 20.2f,
2184               -21.21f, 22.22f, -23.23f, 24.24f, -25.25f, 26.26f, -27.27f, 28.28f,    0.0f}));
2185
2186     // Expect output = input * output + output.
2187     boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(batchVecDesc, std::vector<float>(
2188             { /* batch 0 */
2189                  1.210000f,    4.840000f,   10.889999f,   19.360001f,   30.250000f,   43.559998f,
2190                 59.289997f,   77.440002f,   98.009995f,  102.010010f,  123.432091f,  146.894394f,
2191                172.396896f,  199.939606f,  229.522491f,  261.145599f,  294.808899f,  330.512421f,
2192                368.256134f,  408.040039f,  449.864075f,  493.728363f,  539.632874f,  587.577576f,
2193                637.562500f,  689.587585f,  743.652954f,  799.758423f,    0.000000f,
2194               /* batch 1 */
2195                 -1.210000f,   -4.840000f,  -10.889999f,  -19.360001f,  -30.250000f,  -43.559998f,
2196                -59.289997f,  -77.440002f,  -98.009995f, -102.010010f, -123.432091f, -146.894394f,
2197               -172.396896f, -199.939606f, -229.522491f, -261.145599f, -294.808899f, -330.512421f,
2198               -368.256134f, -408.040039f, -449.864075f, -493.728363f, -539.632874f, -587.577576f,
2199               -637.562500f, -689.587585f, -743.652954f, -799.758423f,    0.000000f,
2200               /* batch 2 */
2201                  1.210000f,   -4.840000f,  10.889999f,   -19.360001f,   30.250000f,  -43.559998f,
2202                 59.289997f,  -77.440002f,  98.009995f,  -102.010010f,  123.432091f, -146.894394f,
2203                172.396896f, -199.939606f, 229.522491f,  -261.145599f,  294.808899f, -330.512421f,
2204                368.256134f, -408.040039f, 449.864075f,  -493.728363f,  539.632874f, -587.577576f,
2205                637.562500f, -689.587585f, 743.652954f,  -799.758423f,    0.000000f,
2206               /* batch 3 */
2207                 -1.210000f,    4.840000f,  -10.889999f,   19.360001f,  -30.250000f,   43.559998f,
2208                -59.289997f,   77.440002f,  -98.009995f,  102.010010f, -123.432091f,  146.894394f,
2209               -172.396896f,  199.939606f, -229.522491f,  261.145599f, -294.808899f,  330.512421f,
2210               -368.256134f,  408.040039f, -449.864075f,  493.728363f, -539.632874f,  587.577576f,
2211               -637.562500f,  689.587585f, -743.652954f,  799.758423f,    0.000000f}));
2212
2213     return LstmUtilsVectorBatchVectorCwiseProductTestImpl<armnn::DataType::Float32>(vector, batchVector,
2214             vecSize, batchSize, expectedOutput);
2215 }
2216
2217
2218 void LstmUtilsVectorBatchVectorAddTest()
2219 {
2220     uint32_t batchSize = 2;
2221     uint32_t vecSize = 3;
2222     armnn::TensorInfo vecDesc({vecSize}, armnn::DataType::Float32);
2223     boost::multi_array<float, 1> vector = MakeTensor<float, 1>(vecDesc, std::vector<float>(
2224             { 0.0f, -0.5f, 1.0f}));
2225
2226     armnn::TensorInfo batchVecDesc({batchSize, vecSize}, armnn::DataType::Float32);
2227     boost::multi_array<float, 2> batchVector = MakeTensor<float, 2>(batchVecDesc, std::vector<float>(
2228             { 1.0f, 2.0f, 3.0f,    //batch 0
2229               4.0f, 5.0f, 6.0f})); //batch 1
2230
2231     boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(batchVecDesc, std::vector<float>(
2232             { 1.0f, 1.5f, 4.0f,
2233               4.0f, 4.5f, 7.0f}));
2234
2235     return LstmUtilsVectorBatchVectorAddTestImpl<armnn::DataType::Float32>(vector, batchVector,
2236             vecSize, batchSize, expectedOutput);
2237 }
2238
2239 #endif
2240
2241 LayerTestResult<float, 2> LstmLayerFloat32WithCifgWithPeepholeNoProjectionTest(
2242     armnn::IWorkloadFactory& workloadFactory,
2243     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2244 {
2245     armnn::TensorInfo inputDesc({ 2, 2 }, armnn::DataType::Float32);
2246     boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
2247             { 2., 3., 3., 4. }));
2248
2249     armnn::TensorInfo outputDesc({ 2, 4 }, armnn::DataType::Float32);
2250     boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
2251             {-0.36444446f, -0.00352185f, 0.12886585f, -0.05163646f,
2252              -0.42734814f, -0.00478661f,  0.13455015f, -0.03560682f}));
2253     return LstmLayerWithCifgWithPeepholeNoProjectionTestImpl<armnn::DataType::Float32>(
2254         workloadFactory, memoryManager, input, expectedOutput);
2255 }
2256
2257 LayerTestResult<float, 2> LstmLayerFloat32NoCifgWithPeepholeWithProjectionTest(
2258     armnn::IWorkloadFactory& workloadFactory,
2259     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2260 {
2261     armnn::TensorInfo inputDesc({ 2, 5 }, armnn::DataType::Float32);
2262     boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
2263             {0.787926f, 0.151646f, 0.071352f, 0.118426f, 0.458058f,
2264              0.295743f, 0.544053f, 0.690064f, 0.858138f, 0.497181f}));
2265
2266     armnn::TensorInfo outputDesc({ 2, 16 }, armnn::DataType::Float32);
2267     boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
2268             {-0.00396806f, 0.029352f,     -0.00279226f, 0.0159977f,   -0.00835576f,
2269              -0.0211779f,  0.0283512f,    -0.0114597f,  0.00907307f,  -0.0244004f,
2270              -0.0152191f,  -0.0259063f,   0.00914318f,  0.00415118f,  0.017147f,
2271              0.0134203f, -0.013869f,    0.0287268f,   -0.00334693f, 0.00733398f,  -0.0287926f,
2272              -0.0186926f,   0.0193662f,   -0.0115437f,  0.00422612f,  -0.0345232f,
2273              0.00223253f,   -0.00957321f, 0.0210624f,   0.013331f,    0.0150954f,
2274              0.02168f}));
2275     return LstmLayerNoCifgWithPeepholeWithProjectionTestImpl<armnn::DataType::Float32>(
2276         workloadFactory, memoryManager, input, expectedOutput);
2277 }
2278
2279 LayerTestResult<float, 2> LstmLayerFloat32NoCifgNoPeepholeNoProjectionTest(
2280     armnn::IWorkloadFactory& workloadFactory,
2281     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2282 {
2283     armnn::TensorInfo inputDesc({2, 2}, armnn::DataType::Float32);
2284     boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
2285             {2., 3., 3., 4.}));
2286
2287
2288     armnn::TensorInfo outputDesc({2, 4}, armnn::DataType::Float32);
2289     boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
2290             {{-0.02973187f, 0.1229473f,   0.20885126f, -0.15358765f,
2291               -0.0185422f,   0.11281417f,  0.24466537f, -0.1826292f}}));
2292
2293     return LstmNoCifgNoPeepholeNoProjectionTestImpl<armnn::DataType::Float32>(
2294         workloadFactory, memoryManager, input, expectedOutput);
2295 }
2296
2297
2298 LayerTestResult<float, 2> LstmLayerFloat32NoCifgWithPeepholeWithProjectionWithLayerNormTest(
2299         armnn::IWorkloadFactory& workloadFactory,
2300         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2301 {
2302     armnn::TensorInfo inputDesc({ 2, 5 }, armnn::DataType::Float32);
2303     boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
2304             {0.7f, 0.8f, 0.1f, 0.2f, 0.3f,     //batch 0
2305              0.3f, 0.2f, 0.9f, 0.8f, 0.1f}));  //batch 1
2306
2307     armnn::TensorInfo outputDesc({ 2, 3 }, armnn::DataType::Float32);
2308     boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
2309             {  0.0244077f,  0.128027f, -0.00170918f,    //batch 0
2310              -0.00692428f, 0.0848741f,    0.063445f})); //batch 1
2311     return LstmLayerNoCifgWithPeepholeWithProjectionWithLayerNormTestImpl<armnn::DataType::Float32>(
2312             workloadFactory, memoryManager, input, expectedOutput);
2313 }
2314
2315
2316 LayerTestResult<int16_t, 2> LstmLayerInt16NoCifgNoPeepholeNoProjectionTest(
2317     armnn::IWorkloadFactory& workloadFactory,
2318     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2319 {
2320     const float qScale = 1.0f;
2321     const int32_t qOffset = 0;
2322
2323     const armnn::DataType datatype = armnn::DataType::QuantisedSymm16;
2324     const armnn::DataType constantDatatype = armnn::DataType::QuantisedAsymm8;
2325
2326     armnn::TensorInfo inputDesc({2, 2}, datatype);
2327     boost::multi_array<int16_t , 2> input = MakeTensor<int16_t , 2>(inputDesc, QuantizedVector<int16_t>(qScale, qOffset,
2328             std::vector<float>{2., 3., 3., 4.}));
2329
2330     armnn::TensorInfo outputDesc({2, 4}, datatype);
2331     boost::multi_array<int16_t, 2> expectedOutput = MakeTensor<int16_t, 2>(outputDesc, QuantizedVector<int16_t>(qScale,
2332             qOffset, std::vector<float>({{-0.02973187f, 0.1229473f,   0.20885126f, -0.15358765f,
2333                                           -0.0185422f,  0.11281417f,  0.24466537f, -0.1826292f}})));
2334
2335     return LstmNoCifgNoPeepholeNoProjectionTestImpl<datatype>(
2336         workloadFactory, memoryManager, input, expectedOutput, qScale, qOffset, constantDatatype);
2337
2338 }
2339
2340 LayerTestResult<int16_t, 2> LstmLayerInt16WithCifgWithPeepholeNoProjectionTest(
2341     armnn::IWorkloadFactory& workloadFactory,
2342     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2343 {
2344     const float qScale = 1.0f;
2345     const int32_t qOffset = 0;
2346
2347     const armnn::DataType datatype = armnn::DataType::QuantisedSymm16;
2348     const armnn::DataType constantDatatype = armnn::DataType::QuantisedAsymm8;
2349
2350     armnn::TensorInfo inputDesc({ 2, 2 }, datatype);
2351     boost::multi_array<int16_t, 2> input = MakeTensor<int16_t, 2>(inputDesc, QuantizedVector<int16_t>(qScale, qOffset,
2352             std::vector<float>({ 2., 3., 3., 4. })));
2353
2354     armnn::TensorInfo outputDesc({ 2, 4 }, datatype);
2355     boost::multi_array<int16_t, 2> expectedOutput = MakeTensor<int16_t, 2>(outputDesc, QuantizedVector<int16_t>(qScale,
2356             qOffset, std::vector<float>(
2357             {-0.36444446f, -0.00352185f, 0.12886585f, -0.05163646f,
2358              -0.42734814f, -0.00478661f, 0.13455015f, -0.03560682f})));
2359
2360     return LstmLayerWithCifgWithPeepholeNoProjectionTestImpl<datatype>(
2361         workloadFactory, memoryManager, input, expectedOutput, qScale, qOffset, constantDatatype);
2362 }
2363
2364 LayerTestResult<int16_t, 2> LstmLayerInt16NoCifgWithPeepholeWithProjectionTest(
2365     armnn::IWorkloadFactory& workloadFactory,
2366     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2367 {
2368     const float qScale = 2.0f;
2369     const int32_t qOffset = 0;
2370
2371     const armnn::DataType datatype = armnn::DataType::QuantisedSymm16;
2372     const armnn::DataType constantDatatype = armnn::DataType::QuantisedAsymm8;
2373
2374     armnn::TensorInfo inputDesc({ 2, 5 }, datatype);
2375     boost::multi_array<int16_t, 2> input = MakeTensor<int16_t, 2>(inputDesc, QuantizedVector<int16_t>(qScale,
2376             qOffset, std::vector<float>(
2377             {0.787926f, 0.151646f, 0.071352f, 0.118426f, 0.458058f,
2378              0.295743f, 0.544053f, 0.690064f, 0.858138f, 0.497181f})));
2379
2380     armnn::TensorInfo outputDesc({ 2, 16 }, datatype);
2381     boost::multi_array<int16_t, 2> expectedOutput = MakeTensor<int16_t, 2>(outputDesc, QuantizedVector<int16_t>(qScale,
2382             qOffset, std::vector<float>(
2383             {-0.00396806f,  0.029352f,   -0.00279226f, 0.0159977f,  -0.00835576f,
2384              -0.0211779f,   0.0283512f,  -0.0114597f,  0.00907307f, -0.0244004f,
2385              -0.0152191f,  -0.0259063f,   0.00914318f, 0.00415118f,  0.017147f,
2386               0.0134203f,  -0.013869f,    0.0287268f, -0.00334693f,  0.00733398f, -0.0287926f,
2387              -0.0186926f,   0.0193662f,  -0.0115437f,  0.00422612f, -0.0345232f,
2388               0.00223253f, -0.00957321f,  0.0210624f,  0.013331f,    0.0150954f,   0.02168f})));
2389
2390     return LstmLayerNoCifgWithPeepholeWithProjectionTestImpl<datatype>(
2391         workloadFactory, memoryManager, input, expectedOutput, qScale, qOffset, constantDatatype);
2392 }
2393
2394 LayerTestResult<int16_t, 2> LstmLayerInt16NoCifgNoPeepholeNoProjectionInt16ConstantTest(
2395     armnn::IWorkloadFactory& workloadFactory,
2396     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2397 {
2398     const float qScale = 1.0f;
2399     const int32_t qOffset = 0;
2400
2401     const armnn::DataType datatype = armnn::DataType::QuantisedSymm16; // datatype & constants set to QSymm16
2402
2403     armnn::TensorInfo inputDesc({2, 2}, datatype);
2404     boost::multi_array<int16_t , 2> input = MakeTensor<int16_t , 2>(inputDesc, QuantizedVector<int16_t>(qScale,
2405             qOffset, std::vector<float>{2., 3., 3., 4.}));
2406
2407     armnn::TensorInfo outputDesc({2, 4}, datatype);
2408     boost::multi_array<int16_t, 2> expectedOutput = MakeTensor<int16_t, 2>(outputDesc, QuantizedVector<int16_t>(qScale,
2409             qOffset, std::vector<float>({{-0.02973187f, 0.1229473f,   0.20885126f, -0.15358765f,
2410                                           -0.0185422f,  0.11281417f,  0.24466537f, -0.1826292f}})));
2411
2412     return LstmNoCifgNoPeepholeNoProjectionTestImpl<datatype>(
2413         workloadFactory, memoryManager, input, expectedOutput, qScale, qOffset, datatype);
2414 }
2415
2416 // QuantizedLstm
2417 LayerTestResult<uint8_t, 2> QuantizedLstmTest(
2418     armnn::IWorkloadFactory& workloadFactory,
2419     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2420 {
2421     armnn::TensorInfo inputDesc({2, 2}, armnn::DataType::QuantisedAsymm8);
2422     boost::multi_array<uint8_t, 2> input = MakeTensor<uint8_t, 2>(inputDesc, std::vector<uint8_t>(
2423         {166, 179, 50, 150}));
2424
2425     armnn::TensorInfo outputDesc({2, 4}, armnn::DataType::QuantisedAsymm8);
2426     boost::multi_array<uint8_t, 2> expectedOutput = MakeTensor<uint8_t, 2>(outputDesc, std::vector<uint8_t>(
2427         {140, 151, 146, 112, 136, 156, 142, 112 }));
2428
2429     return QuantizedLstmTestImpl(workloadFactory, memoryManager, input, expectedOutput);
2430 }
2431
2432 LayerTestResult<float,3> ConcatTest(
2433     armnn::IWorkloadFactory& workloadFactory,
2434     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2435 {
2436     unsigned int outputWidth = 3;
2437     unsigned int outputHeight = 6;
2438     unsigned int outputChannels = 3;
2439
2440     unsigned int inputWidth1 = 3;
2441     unsigned int inputHeight1 = 6;
2442     unsigned int inputChannels1 = 2;
2443
2444     unsigned int inputWidth2 = 3;
2445     unsigned int inputHeight2 = 6;
2446     unsigned int inputChannels2 = 1;
2447
2448     // Define the tensor descriptors.
2449     armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::Float32);
2450     armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::Float32);
2451     armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::Float32);
2452
2453     LayerTestResult<float,3> ret(outputTensorInfo);
2454
2455     ret.outputExpected = MakeTensor<float, 3>(outputTensorInfo, std::vector<float>(
2456     {
2457             1.0f, 2.0f, 3.0f,
2458             4.0f, 5.0f, 6.0f,
2459             7.0f, 8.0f, 9.0f,
2460             10.0f, 11.0f, 12.0f,
2461             13.0f, 14.0f, 15.0f,
2462             16.0f, 17.0f, 18.0f,
2463
2464             19.0f, 20.0f, 21.0f,
2465             22.0f, 23.0f, 24.0f,
2466             25.0f, 26.0f, 27.0f,
2467             28.0f, 29.0f, 30.0f,
2468             31.0f, 32.0f, 33.0f,
2469             34.0f, 35.0f, 36.0f,
2470
2471             37.0f, 38.0f, 39.0f,
2472             40.0f, 41.0f, 42.0f,
2473             43.0f, 44.0f, 45.0f,
2474             46.0f, 47.0f, 48.0f,
2475             49.0f, 50.0f, 51.0f,
2476             52.0f, 53.0f, 54.0f,
2477         })
2478     );
2479
2480     auto input1 = MakeTensor<float, 3>(inputTensorInfo1, std::vector<float>(
2481         {
2482             1.0f, 2.0f, 3.0f,
2483             4.0f, 5.0f, 6.0f,
2484             7.0f, 8.0f, 9.0f,
2485             10.0f, 11.0f, 12.0f,
2486             13.0f, 14.0f, 15.0f,
2487             16.0f, 17.0f, 18.0f,
2488
2489             19.0f, 20.0f, 21.0f,
2490             22.0f, 23.0f, 24.0f,
2491             25.0f, 26.0f, 27.0f,
2492             28.0f, 29.0f, 30.0f,
2493             31.0f, 32.0f, 33.0f,
2494             34.0f, 35.0f, 36.0f,
2495         })
2496     );
2497
2498     auto input2 = MakeTensor<float, 3>(inputTensorInfo2, std::vector<float>(
2499         {
2500             37.0f, 38.0f, 39.0f,
2501             40.0f, 41.0f, 42.0f,
2502             43.0f, 44.0f, 45.0f,
2503             46.0f, 47.0f, 48.0f,
2504             49.0f, 50.0f, 51.0f,
2505             52.0f, 53.0f, 54.0f,
2506         })
2507     );
2508
2509     std::vector<unsigned int> wOrigin1 = {0, 0, 0}; //Extent of the window is defined by size of input[0].
2510     armnn::ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
2511
2512     std::vector<unsigned int> wOrigin2 = {2, 0, 0}; //Extent of the window is defined by size of input[1].
2513     armnn::ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
2514
2515     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2516
2517     bool subTensorsSupported = workloadFactory.SupportsSubTensors();
2518
2519     std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
2520         subTensorsSupported ?
2521             workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
2522             workloadFactory.CreateTensorHandle(inputTensorInfo1);
2523
2524     std::unique_ptr<armnn::ITensorHandle> inputHandle2  =
2525         subTensorsSupported ?
2526             workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
2527             workloadFactory.CreateTensorHandle(inputTensorInfo2);
2528
2529     armnn::ConcatQueueDescriptor data;
2530     armnn::WorkloadInfo info;
2531     AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2532     AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2533     AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2534
2535     data.m_ViewOrigins.push_back(window1);
2536     data.m_ViewOrigins.push_back(window2);
2537
2538     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConcat(data, info);
2539
2540     inputHandle1->Allocate();
2541     inputHandle2->Allocate();
2542     outputHandle->Allocate();
2543
2544     CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
2545     CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
2546
2547     workload->PostAllocationConfigure();
2548     workload->Execute();
2549
2550     CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
2551
2552     return ret;
2553 }
2554
2555 LayerTestResult<float,4> AdditionTest(
2556     armnn::IWorkloadFactory& workloadFactory,
2557     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2558 {
2559     unsigned int batchSize = 2;
2560     unsigned int channels  = 2;
2561     unsigned int height    = 2;
2562     unsigned int width     = 3;
2563
2564     armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
2565     armnn::TensorInfo outputTensorInfo;
2566
2567     unsigned int shape[] = {batchSize, channels, height, width};
2568
2569     inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
2570     inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
2571     outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
2572
2573
2574     auto input1 = MakeTensor<float, 4>(inputTensorInfo1, std::vector<float>(
2575         {
2576             0.0f, 2.0f, 1.0f,
2577             0.2f, 1.0f, 2.0f,
2578
2579             1.0f, 2.0f, 1.0f,
2580             0.2f, 1.0f, 2.0f,
2581
2582             0.0f, 2.0f, 1.0f,
2583             4.2f, 1.0f, 2.0f,
2584
2585             0.0f, 0.0f, 1.0f,
2586             0.2f, 1.0f, 2.0f,
2587         }));
2588
2589     auto input2 = MakeTensor<float, 4>(inputTensorInfo2, std::vector<float>(
2590         {
2591             1.0f, 2.0f, 1.0f,
2592             0.0f, 1.0f, 2.0f,
2593
2594             1.0f, 2.0f, -2.0f,
2595             0.2f, 1.0f, 2.0f,
2596
2597             0.0f, 2.0f, 1.0f,
2598             4.2f, 0.0f, -3.0f,
2599
2600             0.0f, 0.0f, 1.0f,
2601             0.7f, 1.0f, 5.0f,
2602         }));
2603
2604     LayerTestResult<float,4> ret(outputTensorInfo);
2605     ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo, std::vector<float>(
2606         {
2607             1.0f, 4.0f, 2.0f,
2608             0.2f, 2.0f, 4.0f,
2609
2610             2.0f, 4.0f, -1.0f,
2611             0.4f, 2.0f, 4.0f,
2612
2613             0.0f, 4.0f, 2.0f,
2614             8.4f, 1.0f, -1.0f,
2615
2616             0.0f, 0.0f, 2.0f,
2617             0.9f, 2.0f, 7.0f,
2618         }));
2619
2620     std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
2621     std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
2622     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2623
2624     armnn::AdditionQueueDescriptor data;
2625     armnn::WorkloadInfo info;
2626     AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2627     AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2628     AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2629
2630     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
2631
2632     inputHandle1->Allocate();
2633     inputHandle2->Allocate();
2634     outputHandle->Allocate();
2635
2636     CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
2637     CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
2638
2639     workload->PostAllocationConfigure();
2640     workload->Execute();
2641
2642     CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
2643
2644     return ret;
2645 }
2646
2647 LayerTestResult<float, 5> Addition5dTest(
2648     armnn::IWorkloadFactory& workloadFactory,
2649     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2650 {
2651     unsigned int depth     = 2;
2652     unsigned int batchSize = 2;
2653     unsigned int channels  = 2;
2654     unsigned int height    = 2;
2655     unsigned int width     = 3;
2656
2657     armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
2658     armnn::TensorInfo outputTensorInfo;
2659
2660     unsigned int shape[] = {depth, batchSize, channels, height, width};
2661
2662     inputTensorInfo1 = armnn::TensorInfo(5, shape, armnn::DataType::Float32);
2663     inputTensorInfo2 = armnn::TensorInfo(5, shape, armnn::DataType::Float32);
2664     outputTensorInfo = armnn::TensorInfo(5, shape, armnn::DataType::Float32);
2665
2666
2667     auto input1 = MakeTensor<float, 5>(inputTensorInfo1, std::vector<float>(
2668         {
2669             2.6f, 4.0f, 4.4f,  2.7f, 4.6f, 2.8f,
2670             2.3f, 1.9f, 3.4f,  2.9f, 2.2f, 4.5f,
2671
2672             2.8f, 1.9f, 2.3f,  2.6f, 4.7f, 3.5f,
2673             0.4f, 1.5f, 2.1f,  0.7f, 5.0f, 1.1f,
2674
2675
2676             1.0f, 2.7f, 0.0f,  0.6f, 0.8f, 0.9f,
2677             1.0f, 2.6f, 0.4f,  3.8f, 0.4f, 0.8f,
2678
2679             0.5f, 4.3f, 3.1f,  4.4f, 0.7f, 1.4f,
2680             0.4f, 4.4f, 0.7f,  0.6f, 4.7f, 1.2f,
2681
2682         }));
2683
2684     auto input2 = MakeTensor<float, 5>(inputTensorInfo2, std::vector<float>(
2685         {
2686             4.4f, 3.0f, 1.0f,  0.0f, 3.9f, 3.1f,
2687             1.7f, 2.9f, 1.3f,  0.4f, 0.4f, 4.3f,
2688
2689             4.5f, 0.2f, 2.2f,  4.1f, 3.9f, 3.0f,
2690             0.1f, 2.5f, 4.1f,  4.6f, 1.5f, 0.0f,
2691
2692
2693             0.5f, 4.9f, 2.5f,  1.5f, 3.4f, 4.5f,
2694             2.0f, 3.0f, 4.9f,  1.6f, 2.4f, 3.4f,
2695
2696             3.6f, 1.8f, 1.3f,  2.6f, 2.1f, 4.8f,
2697             2.0f, 4.3f, 4.0f,  0.2f, 0.6f, 4.4f,
2698         }));
2699
2700     LayerTestResult<float, 5> ret(outputTensorInfo);
2701     ret.outputExpected = MakeTensor<float, 5>(outputTensorInfo, std::vector<float>(
2702         {
2703             7.0f, 7.0f, 5.4f,  2.7f, 8.5f, 5.9f,
2704             4.0f, 4.8f, 4.7f,  3.3f, 2.6f, 8.8f,
2705
2706             7.3f, 2.1f, 4.5f,  6.7f, 8.6f, 6.5f,
2707             0.5f, 4.0f, 6.2f,  5.3f, 6.5f, 1.1f,
2708
2709
2710             1.5f, 7.6f, 2.5f,  2.1f, 4.2f, 5.4f,
2711             3.0f, 5.6f, 5.3f,  5.4f, 2.8f, 4.2f,
2712
2713             4.1f, 6.1f, 4.4f,  7.0f, 2.8f, 6.2f,
2714             2.4f, 8.7f, 4.7f,  0.8f, 5.3f, 5.6f,
2715         }));
2716
2717     std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
2718     std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
2719     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2720
2721     armnn::AdditionQueueDescriptor data;
2722     armnn::WorkloadInfo info;
2723     AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2724     AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2725     AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2726
2727     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
2728
2729     inputHandle1->Allocate();
2730     inputHandle2->Allocate();
2731     outputHandle->Allocate();
2732
2733     CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0][0]);
2734     CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0][0]);
2735
2736     workload->PostAllocationConfigure();
2737     workload->Execute();
2738
2739     CopyDataFromITensorHandle(&ret.output[0][0][0][0][0], outputHandle.get());
2740
2741     return ret;
2742 }
2743
2744 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
2745 LayerTestResult<T, 4> AdditionBroadcastTestImpl(
2746     armnn::IWorkloadFactory& workloadFactory,
2747     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2748     float qScale,
2749     int32_t qOffset)
2750 {
2751     armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 1}, ArmnnType);
2752     armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 2, 3}, ArmnnType);
2753     armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
2754
2755     if (armnn::IsQuantizedType<T>())
2756     {
2757         inputTensorInfo1.SetQuantizationScale(qScale);
2758         inputTensorInfo1.SetQuantizationOffset(qOffset);
2759         inputTensorInfo2.SetQuantizationScale(qScale);
2760         inputTensorInfo2.SetQuantizationOffset(qOffset);
2761         outputTensorInfo.SetQuantizationScale(qScale);
2762         outputTensorInfo.SetQuantizationOffset(qOffset);
2763     }
2764
2765     auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset,
2766         {
2767             0.0f,
2768             1.0f,
2769
2770             2.0f,
2771             3.0f,
2772
2773             4.0f,
2774             5.0f,
2775         }));
2776
2777     auto input2 = MakeTensor<T, 4>(inputTensorInfo2, QuantizedVector<T>(qScale, qOffset,
2778         {
2779             0.5f, 1.5f, 2.5f,
2780             3.5f, 4.5f, 5.5f,
2781         }));
2782
2783     LayerTestResult<T,4> ret(outputTensorInfo);
2784     ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset,
2785         {
2786             0.5f, 1.5f, 2.5f,
2787             4.5f, 5.5f, 6.5f,
2788
2789             2.5f, 3.5f, 4.5f,
2790             6.5f, 7.5f, 8.5f,
2791
2792             4.5f, 5.5f, 6.5f,
2793             8.5f, 9.5f, 10.5f,
2794         }));
2795
2796     std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
2797     std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
2798     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2799
2800     armnn::AdditionQueueDescriptor data;
2801     armnn::WorkloadInfo info;
2802     AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2803     AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2804     AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2805
2806     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
2807
2808     inputHandle1->Allocate();
2809     inputHandle2->Allocate();
2810     outputHandle->Allocate();
2811
2812     CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
2813     CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
2814
2815     workload->PostAllocationConfigure();
2816     workload->Execute();
2817
2818     CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
2819
2820     return ret;
2821 }
2822
2823 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
2824 LayerTestResult<T, 4> AdditionBroadcast1ElementTestImpl(
2825     armnn::IWorkloadFactory& workloadFactory,
2826     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2827     float qScale,
2828     int32_t qOffset)
2829 {
2830     armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
2831     armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 1, 1}, ArmnnType);
2832     armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
2833
2834     if (armnn::IsQuantizedType<T>())
2835     {
2836         inputTensorInfo1.SetQuantizationScale(qScale);
2837         inputTensorInfo1.SetQuantizationOffset(qOffset);
2838         inputTensorInfo2.SetQuantizationScale(qScale);
2839         inputTensorInfo2.SetQuantizationOffset(qOffset);
2840         outputTensorInfo.SetQuantizationScale(qScale);
2841         outputTensorInfo.SetQuantizationOffset(qOffset);
2842     }
2843
2844     auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset,
2845         {
2846              0.0f,  1.0f,  2.0f,
2847              3.0f,  4.0f,  5.0f,
2848              6.0f,  7.0f,  8.0f,
2849              9.0f, 10.0f, 11.0f,
2850             12.0f, 13.0f, 14.0f,
2851             15.0f, 16.0f, 17.0f,
2852         }));
2853
2854     auto input2 = MakeTensor<T, 4>(inputTensorInfo2, QuantizedVector<T>(qScale, qOffset,
2855         {
2856             0.5f,
2857         }));
2858
2859     LayerTestResult<T,4> ret(outputTensorInfo);
2860     ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset,
2861         {
2862              0.5f,  1.5f,  2.5f,
2863              3.5f,  4.5f,  5.5f,
2864              6.5f,  7.5f,  8.5f,
2865              9.5f, 10.5f, 11.5f,
2866             12.5f, 13.5f, 14.5f,
2867             15.5f, 16.5f, 17.5f,
2868         }));
2869
2870     std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
2871     std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
2872     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2873
2874     armnn::AdditionQueueDescriptor data;
2875     armnn::WorkloadInfo info;
2876     AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2877     AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2878     AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2879
2880     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
2881
2882     inputHandle1->Allocate();
2883     inputHandle2->Allocate();
2884     outputHandle->Allocate();
2885
2886     CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
2887     CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
2888
2889     workload->PostAllocationConfigure();
2890     workload->Execute();
2891
2892     CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
2893
2894     return ret;
2895 }
2896
2897 LayerTestResult<float, 4> AdditionBroadcastTest(
2898     armnn::IWorkloadFactory& workloadFactory,
2899     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2900 {
2901     return AdditionBroadcastTestImpl<armnn::DataType::Float32>(
2902         workloadFactory, memoryManager, 0.0f, 0);
2903 }
2904
2905 LayerTestResult<uint8_t, 4> AdditionBroadcastUint8Test(
2906     armnn::IWorkloadFactory& workloadFactory,
2907     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2908 {
2909     return AdditionBroadcastTestImpl<armnn::DataType::QuantisedAsymm8>(
2910         workloadFactory, memoryManager, 2.f, 0);
2911 }
2912
2913 LayerTestResult<int16_t, 4> AdditionBroadcastInt16Test(
2914     armnn::IWorkloadFactory& workloadFactory,
2915     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2916 {
2917     return AdditionBroadcastTestImpl<armnn::DataType::QuantisedSymm16>(
2918         workloadFactory, memoryManager, 2.f, 0);
2919 }
2920
2921 LayerTestResult<float, 4> AdditionBroadcast1ElementTest(
2922     armnn::IWorkloadFactory& workloadFactory,
2923     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2924 {
2925     return AdditionBroadcast1ElementTestImpl<armnn::DataType::Float32>(
2926         workloadFactory, memoryManager, 0.0f, 0);
2927 }
2928
2929 LayerTestResult<uint8_t, 4> AdditionBroadcast1ElementUint8Test(
2930     armnn::IWorkloadFactory& workloadFactory,
2931     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2932 {
2933     return AdditionBroadcast1ElementTestImpl<armnn::DataType::QuantisedAsymm8>(
2934         workloadFactory, memoryManager, 0.1333333f, 128);
2935 }
2936
2937 LayerTestResult<int16_t, 4> AdditionBroadcast1ElementInt16Test(
2938     armnn::IWorkloadFactory& workloadFactory,
2939     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2940 {
2941     return AdditionBroadcast1ElementTestImpl<armnn::DataType::QuantisedSymm16>(
2942         workloadFactory, memoryManager, 0.1333333f, 0);
2943 }
2944
2945 LayerTestResult<float,4> CompareAdditionTest(
2946     armnn::IWorkloadFactory& workloadFactory,
2947     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2948     armnn::IWorkloadFactory& refWorkloadFactory)
2949 {
2950     unsigned int batchSize = 4;
2951     unsigned int channels  = 1;
2952     unsigned int height    = 2;
2953     unsigned int width     = 3;
2954
2955     armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
2956     armnn::TensorInfo outputTensorInfo;
2957
2958     unsigned int shape[] = {batchSize, channels, height, width};
2959
2960     inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
2961     inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
2962     outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
2963
2964     auto input1 = MakeRandomTensor<float, 4>(inputTensorInfo1, 1232);
2965     auto input2 = MakeRandomTensor<float, 4>(inputTensorInfo2, 456);
2966
2967     LayerTestResult<float,4> ret(outputTensorInfo);
2968
2969     std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
2970     std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
2971     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2972
2973     std::unique_ptr<armnn::ITensorHandle> inputHandle1Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo1);
2974     std::unique_ptr<armnn::ITensorHandle> inputHandle2Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo2);
2975     std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
2976
2977     armnn::AdditionQueueDescriptor data;
2978     armnn::WorkloadInfo info;
2979     AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2980     AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2981     AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2982
2983     armnn::AdditionQueueDescriptor refData = data;
2984     armnn::WorkloadInfo refInfo = info;
2985     SetWorkloadInput(refData, refInfo, 0, inputTensorInfo1, inputHandle1Ref.get());
2986     SetWorkloadInput(refData, refInfo, 1, inputTensorInfo2, inputHandle2Ref.get());
2987     SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
2988
2989     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
2990     std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateAddition(refData, refInfo);
2991
2992     inputHandle1->Allocate();
2993     inputHandle2->Allocate();
2994     outputHandle->Allocate();
2995     inputHandle1Ref->Allocate();
2996     inputHandle2Ref->Allocate();
2997     outputHandleRef->Allocate();
2998
2999     CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
3000     CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
3001     CopyDataToITensorHandle(inputHandle1Ref.get(), &input1[0][0][0][0]);
3002     CopyDataToITensorHandle(inputHandle2Ref.get(), &input2[0][0][0][0]);
3003
3004     workload->PostAllocationConfigure();
3005     workload->Execute();
3006     workloadRef->PostAllocationConfigure();
3007     workloadRef->Execute();
3008
3009     CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
3010     CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
3011
3012     return ret;
3013 }
3014
3015 namespace {
3016 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
3017 LayerTestResult<T, 4> DivisionTestHelper(
3018     armnn::IWorkloadFactory& workloadFactory,
3019     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3020     const unsigned int shape0[4],
3021     const std::vector<T>& values0,
3022     float scale0,
3023     int32_t offset0,
3024     const unsigned int shape1[4],
3025     const std::vector<T> & values1,
3026     float scale1,
3027     int32_t offset1,
3028     const unsigned int outShape[4],
3029     const std::vector<T> & outValues,
3030     float outScale,
3031     int32_t outOffset)
3032 {
3033     armnn::TensorInfo inputTensorInfo0(4, shape0, ArmnnType);
3034     armnn::TensorInfo inputTensorInfo1(4, shape1, ArmnnType);
3035     armnn::TensorInfo outputTensorInfo(4, outShape, ArmnnType);
3036
3037     inputTensorInfo0.SetQuantizationScale(scale0);
3038     inputTensorInfo0.SetQuantizationOffset(offset0);
3039
3040     inputTensorInfo1.SetQuantizationScale(scale1);
3041     inputTensorInfo1.SetQuantizationOffset(offset1);
3042
3043     outputTensorInfo.SetQuantizationScale(outScale);
3044     outputTensorInfo.SetQuantizationOffset(outOffset);
3045
3046     auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
3047     auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
3048
3049     LayerTestResult<T, 4> result(outputTensorInfo);
3050     result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
3051
3052     std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
3053     std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
3054     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3055
3056     armnn::DivisionQueueDescriptor data;
3057     armnn::WorkloadInfo info;
3058     AddInputToWorkload(data,  info, inputTensorInfo0, inputHandle0.get());
3059     AddInputToWorkload(data,  info, inputTensorInfo1, inputHandle1.get());
3060     AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
3061
3062     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateDivision(data, info);
3063
3064     inputHandle0->Allocate();
3065     inputHandle1->Allocate();
3066     outputHandle->Allocate();
3067
3068     CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
3069     CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
3070
3071     workload->PostAllocationConfigure();
3072     workload->Execute();
3073
3074     CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
3075
3076     return result;
3077 }
3078 } // anonymous namespace
3079
3080 LayerTestResult<float,4> DivisionByZeroTest(
3081     armnn::IWorkloadFactory& workloadFactory,
3082     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3083 {
3084     const unsigned int width = 2;
3085     const unsigned int height = 2;
3086     const unsigned int channelCount = 2;
3087     const unsigned int batchSize = 2;
3088
3089     unsigned int shape[] = { batchSize, channelCount, height, width };
3090
3091     std::vector<float> input0({
3092                                 1.f,  1.f,  1.f,  1.f,  0.f, 0.f, 0.f, 0.f,
3093                                -1.f, -1.f, -1.f, -1.f,  5.f, 5.f, 5.f, 5.f });
3094
3095     std::vector<float> input1({
3096                                0.f, 0.f, -0.f, -0.f,  0.f, 0.f, -0.f, -0.f,
3097                                0.f, 0.f, -0.f, -0.f,  5.f, 5.f,  5.f,  5.f });
3098
3099     std::vector<float> output({
3100                                INFINITY, INFINITY, -INFINITY, -INFINITY,  NAN, NAN, -NAN, -NAN,
3101                                -INFINITY, -INFINITY, INFINITY, INFINITY,  1, 1, 1, 1 });
3102
3103     return DivisionTestHelper<armnn::DataType::Float32>(workloadFactory,
3104                                                         memoryManager,
3105                                                         shape, input0, 1.0f, 0,
3106                                                         shape, input1, 1.0f, 0,
3107                                                         shape, output, 1.0f, 0);
3108 }
3109
3110 LayerTestResult<float,4> DivisionTest(
3111     armnn::IWorkloadFactory& workloadFactory,
3112     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3113 {
3114     const unsigned int width = 2;
3115     const unsigned int height = 2;
3116     const unsigned int channelCount = 2;
3117     const unsigned int batchSize = 2;
3118
3119     unsigned int shape[] = { batchSize, channelCount, height, width };
3120
3121     std::vector<float> input0({
3122                                       2,  2,  2,  2,    3,  3,  3,  3,
3123                                       4,  4,  4,  4,    5,  5,  5,  5 });
3124
3125     std::vector<float> input1({
3126                                       1,  1,  1,  1,    2,  2,  2,  2,
3127                                       4,  4,  4,  4,    4,  4,  4,  4 });
3128
3129     std::vector<float> output({
3130                                       2,  2,  2,  2,    1.5,  1.5,  1.5,  1.5,
3131                                       1, 1, 1, 1,  1.25, 1.25, 1.25, 1.25 });
3132
3133
3134     return DivisionTestHelper<armnn::DataType::Float32>(workloadFactory,
3135                                                         memoryManager,
3136                                                         shape, input0, 1.0f, 0,
3137                                                         shape, input1, 1.0f, 0,
3138                                                         shape, output, 1.0f, 0);
3139 }
3140
3141 LayerTestResult<float, 4> DivisionBroadcast1ElementTest(
3142     armnn::IWorkloadFactory& workloadFactory,
3143     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3144 {
3145     unsigned int shape0[] = { 1, 2, 2, 2 };
3146     std::vector<float> input0({ 2, 4, 6, 8, 10, 12, 14, 16});
3147
3148     unsigned int shape1[] = { 1, 1, 1, 1 };
3149     std::vector<float> input1({ 2 });
3150
3151     std::vector<float> output({ 1, 2, 3, 4, 5, 6, 7, 8});
3152
3153
3154     return DivisionTestHelper<armnn::DataType::Float32>(workloadFactory,
3155                                                         memoryManager,
3156                                                         shape0, input0, 1.0f, 0,
3157                                                         shape1, input1, 1.0f, 0,
3158                                                         shape0, output, 1.0f, 0);
3159 }
3160
3161 LayerTestResult<float, 4> DivisionBroadcast1DVectorTest(
3162     armnn::IWorkloadFactory& workloadFactory,
3163     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3164 {
3165     unsigned int shape0[] = { 1, 3, 3, 2 };
3166     std::vector<float> input0({
3167                                       1,   4,       3,  8,      5, 12,
3168                                       7,   16,      9, 20,     11, 24,
3169                                       13,  28,     15, 32,     17, 36});
3170
3171     unsigned int shape1[] = { 1, 1, 1, 2 };
3172     std::vector<float> input1({ 1, 2 });
3173
3174     std::vector<float> output({
3175                                       1,   2,      3,  4,      5,  6,
3176                                       7,   8,      9, 10,     11, 12,
3177                                       13, 14,     15, 16,     17, 18});
3178
3179     return DivisionTestHelper<armnn::DataType::Float32>(workloadFactory,
3180                                                         memoryManager,
3181                                                         shape0, input0, 1.0f, 0,
3182                                                         shape1, input1, 1.0f, 0,
3183                                                         shape0, output, 1.0f, 0);
3184 }
3185
3186 LayerTestResult<uint8_t,4> DivisionUint8Test(
3187     armnn::IWorkloadFactory& workloadFactory,
3188     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3189 {
3190     const unsigned int width = 2;
3191     const unsigned int height = 2;
3192     const unsigned int channelCount = 2;
3193     const unsigned int batchSize = 2;
3194
3195     unsigned int shape[] = { batchSize, channelCount, height, width };
3196
3197     std::vector<uint8_t> input0({2,  2,  2,  2,    3,  3,  3,  3,
3198                                  4,  4,  4,  4,    5,  5,  5,  5 });
3199
3200     std::vector<uint8_t> input1({1,  1,  1,  1,    2,  2,  2,  2,
3201                                  4,  4,  4,  4,    4,  4,  4,  4 });
3202
3203     std::vector<uint8_t> output({8,  8,  8,  8,    6,  6,  6,  6,
3204                                  4,  4,  4,  4,    5,  5,  5,  5});
3205
3206
3207     return DivisionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
3208                                                                 memoryManager,
3209                                                                 shape, input0, 1.0f,  0,
3210                                                                 shape, input1, 1.0f,  0,
3211                                                                 shape, output, 0.25f, 0);
3212 }
3213
3214 LayerTestResult<uint8_t, 4> DivisionBroadcast1ElementUint8Test(
3215     armnn::IWorkloadFactory& workloadFactory,
3216     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3217 {
3218     unsigned int shape0[] = { 1, 2, 2, 2 };
3219     std::vector<uint8_t> input0({ 2, 4, 6, 8, 10, 12, 14, 16});
3220
3221     unsigned int shape1[] = { 1, 1, 1, 1 };
3222     std::vector<uint8_t> input1({ 2 });
3223
3224     std::vector<uint8_t> output({ 1, 2, 3, 4, 5, 6, 7, 8});
3225
3226     return DivisionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
3227                                                                 memoryManager,
3228                                                                 shape0, input0, 1.0f, 0,
3229                                                                 shape1, input1, 1.0f, 0,
3230                                                                 shape0, output, 1.0f, 0);
3231 }
3232
3233 LayerTestResult<uint8_t, 4> DivisionBroadcast1DVectorUint8Test(
3234     armnn::IWorkloadFactory& workloadFactory,
3235     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3236 {
3237     unsigned int shape0[] = { 1, 3, 3, 2 };
3238     std::vector<uint8_t> input0({1,   4,     3,  8,      5,  12,
3239                                  7,   16,    9,  20,     11, 24,
3240                                  13,  28,    15, 32,     17, 36});
3241
3242     unsigned int shape1[] = { 1, 1, 1, 2 };
3243     std::vector<uint8_t> input1({ 1, 2 });
3244
3245     std::vector<uint8_t> output({1,   2,      3,  4,      5,  6,
3246                                  7,   8,      9, 10,     11, 12,
3247                                  13, 14,     15, 16,     17, 18});
3248
3249     return DivisionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
3250                                                                 memoryManager,
3251                                                                 shape0, input0, 1.0f, 0,
3252                                                                 shape1, input1, 1.0f, 0,
3253                                                                 shape0, output, 1.0f, 0);
3254 }
3255
3256 LayerTestResult<int16_t,4> DivisionInt16Test(
3257     armnn::IWorkloadFactory& workloadFactory,
3258     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3259 {
3260     unsigned int shape[] = { 2, 2, 2, 2 };
3261
3262     std::vector<int16_t> input0({2,  2,  2,  2,    3,  3,  3,  3,
3263                                  4,  4,  4,  4,    5,  5,  5,  5 });
3264
3265     std::vector<int16_t> input1({1,  1,  1,  1,    2,  2,  2,  2,
3266                                  4,  4,  4,  4,    4,  4,  4,  4 });
3267
3268     std::vector<int16_t> output({8,  8,  8,  8,    6,  6,  6,  6,
3269                                  4,  4,  4,  4,    5,  5,  5,  5});
3270
3271
3272     return DivisionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
3273                                                                 memoryManager,
3274                                                                 shape, input0, 1.0f,  0,
3275                                                                 shape, input1, 1.0f,  0,
3276                                                                 shape, output, 0.25f, 0);
3277 }
3278
3279 LayerTestResult<int16_t, 4> DivisionBroadcast1ElementInt16Test(
3280     armnn::IWorkloadFactory& workloadFactory,
3281     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3282 {
3283     unsigned int shape0[] = { 1, 2, 2, 2 };
3284     std::vector<int16_t> input0({ 2, 4, 6, 8, 10, 12, 14, 16});
3285
3286     unsigned int shape1[] = { 1, 1, 1, 1 };
3287     std::vector<int16_t> input1({ 2 });
3288
3289     std::vector<int16_t> output({ 1, 2, 3, 4, 5, 6, 7, 8});
3290
3291     return DivisionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
3292                                                                 memoryManager,
3293                                                                 shape0, input0, 1.0f, 0,
3294                                                                 shape1, input1, 1.0f, 0,
3295                                                                 shape0, output, 1.0f, 0);
3296 }
3297
3298 LayerTestResult<int16_t, 4> DivisionBroadcast1DVectorInt16Test(
3299     armnn::IWorkloadFactory& workloadFactory,
3300     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3301 {
3302     unsigned int shape0[] = { 1, 3, 3, 2 };
3303     std::vector<int16_t> input0({1,   4,     3,  8,      5,  12,
3304                                  7,   16,    9,  20,     11, 24,
3305                                  13,  28,    15, 32,     17, 36});
3306
3307     unsigned int shape1[] = { 1, 1, 1, 2 };
3308     std::vector<int16_t> input1({ 1, 2 });
3309
3310     std::vector<int16_t> output({1,   2,      3,  4,      5,  6,
3311                                  7,   8,      9, 10,     11, 12,
3312                                  13, 14,     15, 16,     17, 18});
3313
3314     return DivisionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
3315                                                                 memoryManager,
3316                                                                 shape0, input0, 1.0f, 0,
3317                                                                 shape1, input1, 1.0f, 0,
3318                                                                 shape0, output, 1.0f, 0);
3319 }
3320
3321 template<typename DescriptorType>
3322 std::unique_ptr<armnn::IWorkload> CreateWorkload(
3323     const armnn::IWorkloadFactory& workloadFactory,
3324     const armnn::WorkloadInfo& info,
3325     const DescriptorType& descriptor)
3326 {
3327     return CreateWorkload(workloadFactory, info, descriptor);
3328 };
3329
3330 template<>
3331 std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::MaximumQueueDescriptor>(
3332     const armnn::IWorkloadFactory& workloadFactory,
3333     const armnn::WorkloadInfo& info,
3334     const armnn::MaximumQueueDescriptor& descriptor)
3335 {
3336     return workloadFactory.CreateMaximum(descriptor, info);
3337 }
3338
3339 template<>
3340 std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::MinimumQueueDescriptor>(
3341     const armnn::IWorkloadFactory& workloadFactory,
3342     const armnn::WorkloadInfo& info,
3343     const armnn::MinimumQueueDescriptor& descriptor)
3344 {
3345     return workloadFactory.CreateMinimum(descriptor, info);
3346 }
3347
3348 template<>
3349 std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::EqualQueueDescriptor>(
3350         const armnn::IWorkloadFactory& workloadFactory,
3351         const armnn::WorkloadInfo& info,
3352         const armnn::EqualQueueDescriptor& descriptor)
3353 {
3354     return workloadFactory.CreateEqual(descriptor, info);
3355 }
3356
3357 template<>
3358 std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::GreaterQueueDescriptor>(
3359         const armnn::IWorkloadFactory& workloadFactory,
3360         const armnn::WorkloadInfo& info,
3361         const armnn::GreaterQueueDescriptor& descriptor)
3362 {
3363     return workloadFactory.CreateGreater(descriptor, info);
3364 }
3365
3366 namespace {
3367
3368 template <typename Descriptor,
3369           armnn::DataType ArmnnTypeInput,
3370           armnn::DataType ArmnnTypeOutput,
3371           typename TInput = armnn::ResolveType<ArmnnTypeInput>,
3372           typename TOutput = armnn::ResolveType<ArmnnTypeOutput>>
3373 LayerTestResult<TOutput, 4> ElementwiseTestHelper(
3374     armnn::IWorkloadFactory & workloadFactory,
3375     const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,
3376     const unsigned int shape0[4], std::vector<TInput> values0,
3377     const unsigned int shape1[4], std::vector<TInput> values1,
3378     const unsigned int outShape[4], std::vector<TOutput> outValues,
3379     float qScale = 0.0f, int qOffset = 0)
3380 {
3381     const uint32_t dimensionCount = 4;
3382     armnn::TensorInfo inputTensorInfo0{dimensionCount, shape0, ArmnnTypeInput};
3383     armnn::TensorInfo inputTensorInfo1{dimensionCount, shape1, ArmnnTypeInput};
3384     armnn::TensorInfo outputTensorInfo{dimensionCount, outShape, ArmnnTypeOutput};
3385
3386     auto input0 = MakeTensor<TInput, 4>(inputTensorInfo0, values0);
3387     auto input1 = MakeTensor<TInput, 4>(inputTensorInfo1, values1);
3388
3389     if (armnn::IsQuantizedType<TInput>())
3390     {
3391         inputTensorInfo0.SetQuantizationScale(qScale);
3392         inputTensorInfo0.SetQuantizationOffset(qOffset);
3393
3394         inputTensorInfo1.SetQuantizationScale(qScale);
3395         inputTensorInfo1.SetQuantizationOffset(qOffset);
3396
3397         outputTensorInfo.SetQuantizationScale(qScale);
3398         outputTensorInfo.SetQuantizationOffset(qOffset);
3399     }
3400
3401     LayerTestResult<TOutput,4> ret(outputTensorInfo);
3402
3403     if(ArmnnTypeOutput == armnn::DataType::Boolean)
3404     {
3405         ret.compareBoolean = true;
3406     }
3407
3408     std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
3409     std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
3410     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3411
3412     Descriptor data;
3413     armnn::WorkloadInfo info;
3414     AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
3415     AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
3416     AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
3417     auto workload = CreateWorkload<Descriptor>(workloadFactory, info, data);
3418
3419     inputHandle0->Allocate();
3420     inputHandle1->Allocate();
3421     outputHandle->Allocate();
3422
3423     CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
3424     CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
3425
3426     workload->PostAllocationConfigure();
3427     ExecuteWorkload(*workload, memoryManager);
3428
3429     CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
3430
3431     ret.outputExpected = MakeTensor<TOutput, 4>(outputTensorInfo, outValues);
3432     return ret;
3433 }
3434
3435 template <typename Descriptor, armnn::DataType ArmnnT, typename T = armnn::ResolveType<ArmnnT>>
3436 LayerTestResult<T, 4> ElementwiseTestHelper(
3437     armnn::IWorkloadFactory & workloadFactory,
3438     const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,
3439     const unsigned int shape0[4], std::vector<T> values0,
3440     const unsigned int shape1[4], std::vector<T> values1,
3441     const unsigned int outShape[4], std::vector<T> outValues,
3442     float qScale = 0.0f, int qOffset = 0)
3443 {
3444     return ElementwiseTestHelper<Descriptor, ArmnnT, ArmnnT>
3445         (workloadFactory,
3446          memoryManager,
3447          shape0,
3448          values0,
3449          shape1,
3450          values1,
3451          outShape,
3452          outValues,
3453          qScale,
3454          qOffset);
3455 }
3456 }
3457
3458 LayerTestResult<uint8_t, 4> EqualSimpleTest(armnn::IWorkloadFactory& workloadFactory,
3459                                             const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3460 {
3461     const unsigned int width = 2;
3462     const unsigned int height = 2;
3463     const unsigned int channelCount = 2;
3464     const unsigned int batchSize = 2;
3465
3466     unsigned int shape[] = { batchSize, channelCount, height, width };
3467
3468     std::vector<float> input0({ 1, 1, 1, 1,  5, 5, 5, 5,
3469                                 3, 3, 3, 3,  4, 4, 4, 4 });
3470
3471     std::vector<float> input1({ 1, 1, 1, 1,  3, 3, 3, 3,
3472                                 5, 5, 5, 5,  4, 4, 4, 4 });
3473
3474     std::vector<uint8_t> output({ 1, 1, 1, 1,  0, 0, 0, 0,
3475                                   0, 0, 0, 0,  1, 1, 1, 1 });
3476
3477     return ElementwiseTestHelper<armnn::EqualQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
3478         workloadFactory,
3479         memoryManager,
3480         shape,
3481         input0,
3482         shape,
3483         input1,
3484         shape,
3485         output);
3486 }
3487
3488 LayerTestResult<uint8_t, 4> EqualBroadcast1ElementTest(
3489         armnn::IWorkloadFactory& workloadFactory,
3490         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3491 {
3492     unsigned int shape0[] = { 1, 2, 2, 2 };
3493     std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
3494
3495     unsigned int shape1[] = { 1, 1, 1, 1 };
3496     std::vector<float> input1({ 1 });
3497
3498     std::vector<uint8_t> output({ 1, 0, 0, 0, 0, 0, 0, 0});
3499
3500     return ElementwiseTestHelper<armnn::EqualQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
3501         workloadFactory,
3502         memoryManager,
3503         shape0,
3504         input0,
3505         shape1,
3506         input1,
3507         shape0,
3508         output);
3509 }
3510
3511 LayerTestResult<uint8_t, 4> EqualBroadcast1DVectorTest(
3512         armnn::IWorkloadFactory& workloadFactory,
3513         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3514 {
3515     const unsigned int shape0[] = { 1, 2, 2, 3 };
3516     const unsigned int shape1[] = { 1, 1, 1, 3 };
3517
3518     std::vector<float> input0({ 1, 2, 3, 4, 5, 6,
3519                                 7, 8, 9, 10, 11, 12 });
3520
3521     std::vector<float> input1({ 1, 2, 3});
3522
3523     std::vector<uint8_t> output({ 1, 1, 1, 0, 0, 0,
3524                                   0, 0, 0, 0, 0, 0 });
3525
3526     return ElementwiseTestHelper<armnn::EqualQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
3527         workloadFactory,
3528         memoryManager,
3529         shape0,
3530         input0,
3531         shape1,
3532         input1,
3533         shape0,
3534         output);
3535 }
3536
3537 LayerTestResult<uint8_t, 4> EqualUint8Test(
3538         armnn::IWorkloadFactory& workloadFactory,
3539         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3540 {
3541     unsigned int shape[] = { 2, 2, 2, 2 };
3542
3543     // See dequantized values to the right.
3544     std::vector<uint8_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
3545                                   3, 3, 3, 3, 7, 7, 7, 7 });
3546
3547     std::vector<uint8_t> input1({ 2, 2, 2, 2, 6, 6, 6, 6,
3548                                   3, 3, 3, 3, 5, 5, 5, 5 });
3549
3550     std::vector<uint8_t> output({ 0, 0, 0, 0, 1, 1, 1, 1,
3551                                   1, 1, 1, 1, 0, 0, 0, 0 });
3552
3553     return ElementwiseTestHelper<armnn::EqualQueueDescriptor,
3554                                  armnn::DataType::QuantisedAsymm8,
3555                                  armnn::DataType::Boolean>(
3556         workloadFactory,
3557         memoryManager,
3558         shape,
3559         input0,
3560         shape,
3561         input1,
3562         shape,
3563         output,
3564         1.0f,
3565         0);
3566 }
3567
3568 LayerTestResult<uint8_t, 4> EqualBroadcast1ElementUint8Test(
3569         armnn::IWorkloadFactory& workloadFactory,
3570         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3571 {
3572     const unsigned int shape0[] = { 1, 2, 2, 3 };
3573     const unsigned int shape1[] = { 1, 1, 1, 1 };
3574
3575     std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
3576                                   7, 8, 9, 10, 11, 12 });
3577
3578     std::vector<uint8_t> input1({ 1 });
3579
3580     std::vector<uint8_t> output({ 1, 0, 0, 0, 0, 0,
3581                                   0, 0, 0, 0, 0, 0 });
3582
3583     return ElementwiseTestHelper<armnn::EqualQueueDescriptor,
3584                                  armnn::DataType::QuantisedAsymm8,
3585                                  armnn::DataType::Boolean>(
3586         workloadFactory,
3587         memoryManager,
3588         shape0,
3589         input0,
3590         shape1,
3591         input1,
3592         shape0,
3593         output,
3594         1.0f,
3595         0);
3596 }
3597
3598 LayerTestResult<uint8_t, 4> EqualBroadcast1DVectorUint8Test(
3599         armnn::IWorkloadFactory& workloadFactory,
3600         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3601 {
3602     const unsigned int shape0[] = { 1, 2, 2, 3 };
3603     const unsigned int shape1[] = { 1, 1, 1, 3 };
3604
3605     std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
3606                                   7, 8, 9, 10, 11, 12 });
3607
3608     std::vector<uint8_t> input1({ 1, 1, 3});
3609
3610     std::vector<uint8_t> output({ 1, 0, 1, 0, 0, 0,
3611                                   0, 0, 0, 0, 0, 0 });
3612
3613     return ElementwiseTestHelper<armnn::EqualQueueDescriptor,
3614                                  armnn::DataType::QuantisedAsymm8,
3615                                  armnn::DataType::Boolean>(
3616         workloadFactory,
3617         memoryManager,
3618         shape0,
3619         input0,
3620         shape1,
3621         input1,
3622         shape0,
3623         output,
3624         1.0f,
3625         0);
3626 }
3627
3628 LayerTestResult<uint8_t, 4> GreaterSimpleTest(armnn::IWorkloadFactory& workloadFactory,
3629                                             const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3630 {
3631     const unsigned int width = 2;
3632     const unsigned int height = 2;
3633     const unsigned int channelCount = 2;
3634     const unsigned int batchSize = 2;
3635
3636     unsigned int shape[] = { batchSize, channelCount, height, width };
3637
3638     std::vector<float> input0({ 1, 1, 1, 1,  5, 5, 5, 5,
3639                                 3, 3, 3, 3,  4, 4, 4, 4 });
3640
3641     std::vector<float> input1({ 1, 1, 1, 1,  3, 3, 3, 3,
3642                                 5, 5, 5, 5,  4, 4, 4, 4 });
3643
3644     std::vector<uint8_t> output({ 0, 0, 0, 0,  1, 1, 1, 1,
3645                                   0, 0, 0, 0,  0, 0, 0, 0 });
3646
3647     return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
3648         workloadFactory,
3649         memoryManager,
3650         shape,
3651         input0,
3652         shape,
3653         input1,
3654         shape,
3655         output);
3656 }
3657
3658 LayerTestResult<uint8_t, 4> GreaterBroadcast1ElementTest(
3659         armnn::IWorkloadFactory& workloadFactory,
3660         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3661 {
3662     unsigned int shape0[] = { 1, 2, 2, 2 };
3663     std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
3664
3665     unsigned int shape1[] = { 1, 1, 1, 1 };
3666     std::vector<float> input1({ 1 });
3667
3668     std::vector<uint8_t> output({ 0, 1, 1, 1, 1, 1, 1, 1});
3669
3670     return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
3671         workloadFactory,
3672         memoryManager,
3673         shape0,
3674         input0,
3675         shape1,
3676         input1,
3677         shape0,
3678         output);
3679 }
3680
3681 LayerTestResult<uint8_t, 4> GreaterBroadcast1DVectorTest(
3682         armnn::IWorkloadFactory& workloadFactory,
3683         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3684 {
3685     const unsigned int shape0[] = { 1, 2, 2, 3 };
3686     const unsigned int shape1[] = { 1, 1, 1, 3 };
3687
3688     std::vector<float> input0({ 1, 2.9f, 2.1f, 4, 5, 6,
3689                                 7, 8, 9, 10, 11, 12 });
3690
3691     std::vector<float> input1({ 1, 3, 2});
3692
3693     std::vector<uint8_t> output({ 0, 0, 1, 1, 1, 1,
3694                                   1, 1, 1, 1, 1, 1 });
3695
3696     return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
3697         workloadFactory,
3698         memoryManager,
3699         shape0,
3700         input0,
3701         shape1,
3702         input1,
3703         shape0,
3704         output);
3705 }
3706
3707 LayerTestResult<uint8_t, 4> GreaterUint8Test(
3708         armnn::IWorkloadFactory& workloadFactory,
3709         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3710 {
3711     unsigned int shape[] = { 2, 2, 2, 2 };
3712
3713     // See dequantized values to the right.
3714     std::vector<uint8_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
3715                                   3, 3, 3, 3, 5, 5, 5, 5 });
3716
3717     std::vector<uint8_t> input1({ 2, 2, 2, 2, 6, 6, 6, 6,
3718                                   2, 2, 2, 2, 5, 5, 5, 5 });
3719
3720     std::vector<uint8_t> output({ 0, 0, 0, 0, 0, 0, 0, 0,
3721                                   1, 1, 1, 1, 0, 0, 0, 0 });
3722
3723     return ElementwiseTestHelper<armnn::GreaterQueueDescriptor,
3724                                  armnn::DataType::QuantisedAsymm8,
3725                                  armnn::DataType::Boolean>(
3726         workloadFactory,
3727         memoryManager,
3728         shape,
3729         input0,
3730         shape,
3731         input1,
3732         shape,
3733         output,
3734         1.0f,
3735         0);
3736 }
3737
3738 LayerTestResult<uint8_t, 4> GreaterBroadcast1ElementUint8Test(
3739         armnn::IWorkloadFactory& workloadFactory,
3740         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3741 {
3742     const unsigned int shape0[] = { 1, 2, 2, 3 };
3743     const unsigned int shape1[] = { 1, 1, 1, 1 };
3744
3745     std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
3746                                   7, 8, 9, 10, 11, 12 });
3747
3748     std::vector<uint8_t> input1({ 1 });
3749
3750     std::vector<uint8_t> output({ 0, 1, 1, 1, 1, 1,
3751                                   1, 1, 1, 1, 1, 1 });
3752
3753     return ElementwiseTestHelper<armnn::GreaterQueueDescriptor,
3754                                  armnn::DataType::QuantisedAsymm8,
3755                                  armnn::DataType::Boolean>(
3756         workloadFactory,
3757         memoryManager,
3758         shape0,
3759         input0,
3760         shape1,
3761         input1,
3762         shape0,
3763         output,
3764         1.0f,
3765         0);
3766 }
3767
3768 LayerTestResult<uint8_t, 4> GreaterBroadcast1DVectorUint8Test(
3769         armnn::IWorkloadFactory& workloadFactory,
3770         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3771 {
3772     const unsigned int shape0[] = { 1, 2, 2, 3 };
3773     const unsigned int shape1[] = { 1, 1, 1, 3 };
3774
3775     std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
3776                                   7, 8, 9, 10, 11, 12 });
3777
3778     std::vector<uint8_t> input1({ 1, 1, 3});
3779
3780     std::vector<uint8_t> output({ 0, 1, 0, 1, 1, 1,
3781                                   1, 1, 1, 1, 1, 1 });
3782
3783     return ElementwiseTestHelper<armnn::GreaterQueueDescriptor,
3784                                  armnn::DataType::QuantisedAsymm8,
3785                                  armnn::DataType::Boolean>(
3786         workloadFactory,
3787         memoryManager,
3788         shape0,
3789         input0,
3790         shape1,
3791         input1,
3792         shape0,
3793         output,
3794         1.0f,
3795         0);
3796 }
3797
3798 LayerTestResult<float, 4> MaximumSimpleTest(armnn::IWorkloadFactory& workloadFactory,
3799                                            const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3800 {
3801     const unsigned int width = 2;
3802     const unsigned int height = 2;
3803     const unsigned int channelCount = 2;
3804     const unsigned int batchSize = 2;
3805
3806     unsigned int shape[] = { batchSize, channelCount, height, width };
3807
3808     std::vector<float> input0({ 1, 1, 1, 1,  5, 5, 5, 5,
3809                                 3, 3, 3, 3,  4, 4, 4, 4 });
3810
3811     std::vector<float> input1({ 2, 2, 2, 2,  3, 3, 3, 3,
3812                                 4, 4, 4, 4,  5, 5, 5, 5 });
3813
3814     std::vector<float> output({ 2, 2, 2, 2,  5, 5, 5, 5,
3815                                 4, 4, 4, 4,  5, 5, 5, 5 });
3816
3817     return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::Float32>(
3818         workloadFactory,
3819         memoryManager,
3820         shape,
3821         input0,
3822         shape,
3823         input1,
3824         shape,
3825         output);
3826 }
3827
3828 LayerTestResult<float, 4> MaximumBroadcast1ElementTest(
3829         armnn::IWorkloadFactory& workloadFactory,
3830         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3831 {
3832     unsigned int shape0[] = { 1, 2, 2, 2 };
3833     std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
3834
3835     unsigned int shape1[] = { 1, 1, 1, 1 };
3836     std::vector<float> input1({ 2 });
3837
3838     std::vector<float> output({ 2, 2, 3, 4, 5, 6, 7, 8});
3839
3840     return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::Float32>(
3841         workloadFactory,
3842         memoryManager,
3843         shape0,
3844         input0,
3845         shape1,
3846         input1,
3847         shape0,
3848         output);
3849 }
3850
3851 LayerTestResult<float, 4> MaximumBroadcast1DVectorTest(
3852         armnn::IWorkloadFactory& workloadFactory,
3853         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3854 {
3855     const unsigned int shape0[] = { 1, 2, 2, 3 };
3856     const unsigned int shape1[] = { 1, 1, 1, 3 };
3857
3858     std::vector<float> input0({ 1, 2, 3, 4, 5, 6,
3859                                   7, 8, 9, 10, 11, 12 });
3860
3861     std::vector<float> input1({ 1, 2, 3});
3862
3863     std::vector<float> output({ 1, 2, 3, 4, 5, 6,
3864                                 7, 8, 9, 10, 11, 12 });
3865
3866     return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::Float32>(
3867         workloadFactory,
3868         memoryManager,
3869         shape0,
3870         input0,
3871         shape1,
3872         input1,
3873         shape0,
3874         output);
3875 }
3876
3877 LayerTestResult<uint8_t, 4> MaximumUint8Test(
3878         armnn::IWorkloadFactory& workloadFactory,
3879         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3880 {
3881     unsigned int shape[] = { 2, 2, 2, 2 };
3882
3883     // See dequantized values to the right.
3884     std::vector<uint8_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
3885                                   3, 3, 3, 3, 4, 4, 4, 4 });
3886
3887     std::vector<uint8_t> input1({ 2, 2, 2, 2, 3, 3, 3, 3,
3888                                   4, 4, 4, 4, 5, 5, 5, 5 });
3889
3890     std::vector<uint8_t> output({ 2, 2, 2, 2, 6, 6, 6, 6,
3891                                   4, 4, 4, 4, 5, 5, 5, 5 });
3892
3893     return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
3894         workloadFactory,
3895         memoryManager,
3896         shape,
3897         input0,
3898         shape,
3899         input1,
3900         shape,
3901         output,
3902         1.0f,
3903         0);
3904 }
3905
3906 LayerTestResult<uint8_t, 4> MaximumBroadcast1ElementUint8Test(
3907         armnn::IWorkloadFactory& workloadFactory,
3908         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3909 {
3910     const unsigned int shape0[] = { 1, 2, 2, 3 };
3911     const unsigned int shape1[] = { 1, 1, 1, 1 };
3912
3913     std::vector<uint8_t> input0({ 1, 2, 3, 4,  5, 6,
3914                                   7, 8, 9, 10, 11, 12 });
3915
3916     std::vector<uint8_t> input1({2});
3917
3918     std::vector<uint8_t> output({ 2, 2, 3, 4, 5, 6,
3919                                   7, 8, 9, 10, 11, 12 });
3920
3921     return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
3922         workloadFactory,
3923         memoryManager,
3924         shape0,
3925         input0,
3926         shape1,
3927         input1,
3928         shape0,
3929         output,
3930         1.0f,
3931         0);
3932 }
3933
3934 LayerTestResult<uint8_t, 4> MaximumBroadcast1DVectorUint8Test(
3935         armnn::IWorkloadFactory& workloadFactory,
3936         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3937 {
3938     const unsigned int shape0[] = { 1, 2, 2, 3 };
3939     const unsigned int shape1[] = { 1, 1, 1, 3 };
3940
3941     std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
3942                                   7, 8, 9, 10, 11, 12 });
3943
3944     std::vector<uint8_t> input1({ 1, 10, 3});
3945
3946     std::vector<uint8_t> output({ 1, 10, 3, 4, 10, 6,
3947                                   7, 10, 9, 10, 11, 12 });
3948
3949     return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
3950         workloadFactory,
3951         memoryManager,
3952         shape0,
3953         input0,
3954         shape1,
3955         input1,
3956         shape0,
3957         output,
3958         1.0f,
3959         0);
3960 }
3961
3962 LayerTestResult<int16_t, 4> MaximumInt16Test(
3963     armnn::IWorkloadFactory& workloadFactory,
3964     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3965 {
3966     unsigned int shape[] = { 2, 2, 2, 2 };
3967
3968     std::vector<int16_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
3969                                   3, 3, 3, 3, 4, 4, 4, 4 });
3970
3971     std::vector<int16_t> input1({ 2, 2, 2, 2, 3, 3, 3, 3,
3972                                   4, 4, 4, 4, 5, 5, 5, 5 });
3973
3974     std::vector<int16_t> output({ 2, 2, 2, 2, 6, 6, 6, 6,
3975                                   4, 4, 4, 4, 5, 5, 5, 5 });
3976
3977     return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
3978         workloadFactory,
3979         memoryManager,
3980         shape,
3981         input0,
3982         shape,
3983         input1,
3984         shape,
3985         output,
3986         1.0f,
3987         0);
3988 }
3989
3990 LayerTestResult<int16_t, 4> MaximumBroadcast1ElementInt16Test(
3991     armnn::IWorkloadFactory& workloadFactory,
3992     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3993 {
3994     const unsigned int shape0[] = { 1, 2, 2, 3 };
3995     const unsigned int shape1[] = { 1, 1, 1, 1 };
3996
3997     std::vector<int16_t> input0({ 1, 2, 3, 4,  5, 6,
3998                                   7, 8, 9, 10, 11, 12 });
3999
4000     std::vector<int16_t> input1({2});
4001
4002     std::vector<int16_t> output({ 2, 2, 3, 4, 5, 6,
4003                                   7, 8, 9, 10, 11, 12 });
4004
4005     return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
4006         workloadFactory,
4007         memoryManager,
4008         shape0,
4009         input0,
4010         shape1,
4011         input1,
4012         shape0,
4013         output,
4014         1.0f,
4015         0);
4016 }
4017
4018 LayerTestResult<int16_t, 4> MaximumBroadcast1DVectorInt16Test(
4019     armnn::IWorkloadFactory& workloadFactory,
4020     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
4021 {
4022     const unsigned int shape0[] = { 1, 2, 2, 3 };
4023     const unsigned int shape1[] = { 1, 1, 1, 3 };
4024
4025     std::vector<int16_t> input0({ 1, 2, 3, 4, 5, 6,
4026                                   7, 8, 9, 10, 11, 12 });
4027
4028     std::vector<int16_t> input1({ 1, 10, 3});
4029
4030     std::vector<int16_t> output({ 1, 10, 3, 4, 10, 6,
4031                                   7, 10, 9, 10, 11, 12 });
4032
4033     return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
4034         workloadFactory,
4035         memoryManager,
4036         shape0,
4037         input0,
4038         shape1,
4039         input1,
4040         shape0,
4041         output,
4042         1.0f,
4043         0);
4044 }
4045
4046 LayerTestResult<float, 4> MinimumBroadcast1ElementTest1(
4047     armnn::IWorkloadFactory& workloadFactory,
4048     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
4049 {
4050     unsigned int shape0[] = { 1, 2, 2, 2 };
4051     std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
4052
4053     unsigned int shape1[] = { 1, 1, 1, 1 };
4054     std::vector<float> input1({ 2 });
4055
4056     std::vector<float> output({ 1, 2, 2, 2, 2, 2, 2, 2});
4057
4058     return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::Float32>(
4059         workloadFactory,
4060         memoryManager,
4061         shape0,
4062         input0,
4063         shape1,
4064         input1,
4065         shape0,
4066         output);
4067 }
4068
4069
4070 LayerTestResult<float, 4> MinimumBroadcast1ElementTest2(
4071     armnn::IWorkloadFactory& workloadFactory,
4072     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
4073 {
4074     unsigned int shape0[] = { 1, 2, 2, 2 };
4075     std::vector<float> input0({ 1, 6, 3, 2, 8, 9, 1, 10});
4076
4077     unsigned int shape1[] = { 1, 1, 1, 1 };
4078     std::vector<float> input1({ 5 });
4079
4080     std::vector<float> output({ 1, 5, 3, 2, 5, 5, 1, 5});
4081
4082     return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::Float32>(
4083         workloadFactory,
4084         memoryManager,
4085         shape0,
4086         input0,
4087         shape1,
4088         input1,
4089         shape0,
4090         output);
4091 }
4092
4093 LayerTestResult<uint8_t, 4> MinimumBroadcast1DVectorUint8Test(
4094     armnn::IWorkloadFactory & workloadFactory,
4095     const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager)
4096 {
4097     const unsigned int shape0[] = { 1, 2, 2, 3 };
4098     const unsigned int shape1[] = { 1, 1, 1, 3 };
4099
4100     std::vector<uint8_t> input0({ 1, 2, 3, 3, 2, 1,
4101                                   7, 1, 2, 3, 4, 5 });
4102
4103     std::vector<uint8_t> input1({ 1, 2, 3});
4104
4105     std::vector<uint8_t> output({ 1, 2, 3, 1, 2, 1,
4106                                   1, 1, 2, 1, 2, 3 });
4107
4108     return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
4109         workloadFactory,
4110         memoryManager,
4111         shape0,
4112         input0,
4113         shape1,
4114         input1,
4115         shape0,
4116         output,
4117         1.0f,
4118         0);
4119 }
4120
4121 LayerTestResult<int16_t, 4> MinimumInt16Test(
4122     armnn::IWorkloadFactory& workloadFactory,
4123     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
4124 {
4125     unsigned int shape[] = { 2, 2, 2, 2 };
4126
4127     std::vector<int16_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
4128                                   3, 3, 3, 3, 4, 4, 4, 4 });
4129
4130     std::vector<int16_t> input1({ 2, 2, 2, 2, 3, 3, 3, 3,
4131                                   4, 4, 4, 4, 5, 5, 5, 5 });
4132
4133     std::vector<int16_t> output({ 1, 1, 1, 1, 3, 3, 3, 3,
4134                                   3, 3, 3, 3, 4, 4, 4, 4 });
4135
4136     return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
4137         workloadFactory,
4138         memoryManager,
4139         shape,
4140         input0,
4141         shape,
4142         input1,
4143         shape,
4144         output,
4145         1.0f,
4146         0);
4147 }
4148
4149 LayerTestResult<int16_t, 4> MinimumBroadcast1ElementInt16Test(
4150     armnn::IWorkloadFactory& workloadFactory,
4151     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
4152 {
4153     const unsigned int shape0[] = { 1, 2, 2, 3 };
4154     const unsigned int shape1[] = { 1, 1, 1, 1 };
4155
4156     std::vector<int16_t> input0({ 1, 2, 3, 4,  5, 6,
4157                                   7, 8, 9, 10, 11, 12 });
4158
4159     std::vector<int16_t> input1({2});
4160
4161     std::vector<int16_t> output({ 1, 2, 2, 2, 2, 2,
4162                                   2, 2, 2, 2, 2, 2 });
4163
4164     return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
4165         workloadFactory,
4166         memoryManager,
4167         shape0,
4168         input0,
4169         shape1,
4170         input1,
4171         shape0,
4172         output,
4173         1.0f,
4174         0);
4175 }
4176
4177 LayerTestResult<int16_t, 4> MinimumBroadcast1DVectorInt16Test(
4178     armnn::IWorkloadFactory& workloadFactory,
4179     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
4180 {
4181     const unsigned int shape0[] = { 1, 2, 2, 3 };
4182     const unsigned int shape1[] = { 1, 1, 1, 3 };
4183
4184     std::vector<int16_t> input0({ 1, 2, 3, 4, 5, 6,
4185                                   7, 8, 9, 10, 11, 12 });
4186
4187     std::vector<int16_t> input1({ 1, 10, 3});
4188
4189     std::vector<int16_t> output({ 1, 2, 3, 1, 5, 3,
4190                                   1, 8, 3, 1, 10, 3 });
4191
4192     return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
4193         workloadFactory,
4194         memoryManager,
4195         shape0,
4196         input0,
4197         shape1,
4198         input1,
4199         shape0,
4200         output,
4201         1.0f,
4202         0);
4203 }
4204
4205 namespace {
4206 template<std::size_t NumDims>
4207 LayerTestResult<float,NumDims> MultiplicationTestHelper(
4208     armnn::IWorkloadFactory& workloadFactory,
4209     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4210     const unsigned int shape0[NumDims],
4211     const std::vector<float> & values0,
4212     const unsigned int shape1[NumDims],
4213     const std::vector<float> & values1,
4214     const unsigned int outShape[NumDims],
4215     const std::vector<float> & outValues)
4216 {
4217     armnn::TensorInfo inputTensorInfo0{NumDims, shape0, armnn::DataType::Float32};
4218     armnn::TensorInfo inputTensorInfo1{NumDims, shape1, armnn::DataType::Float32};
4219     armnn::TensorInfo outputTensorInfo{NumDims, outShape, armnn::DataType::Float32};
4220
4221     auto input0 = MakeTensor<float, NumDims>(inputTensorInfo0, values0);
4222     auto input1 = MakeTensor<float, NumDims>(inputTensorInfo1, values1);
4223
4224     LayerTestResult<float,NumDims> ret(outputTensorInfo);
4225
4226     std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
4227     std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
4228     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4229
4230     armnn::MultiplicationQueueDescriptor data;
4231     armnn::WorkloadInfo info;
4232     AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
4233     AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
4234     AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
4235
4236     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
4237
4238     inputHandle0->Allocate();
4239     inputHandle1->Allocate();
4240     outputHandle->Allocate();
4241
4242     CopyDataToITensorHandle(inputHandle0.get(), input0.origin());
4243     CopyDataToITensorHandle(inputHandle1.get(), input1.origin());
4244
4245     workload->PostAllocationConfigure();
4246     workload->Execute();
4247
4248     CopyDataFromITensorHandle(ret.output.origin(), outputHandle.get());
4249
4250     ret.outputExpected = MakeTensor<float, NumDims>(outputTensorInfo, outValues);
4251     return ret;
4252 }
4253 } // anonymous namespace
4254
4255
4256 LayerTestResult<float,4> MultiplicationTest(
4257     armnn::IWorkloadFactory& workloadFactory,
4258     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
4259 {
4260     const unsigned int width = 2;
4261     const unsigned int height = 2;
4262     const unsigned int channelCount = 2;
4263     const unsigned int batchSize = 2;
4264
4265     unsigned int shape[] = { batchSize, channelCount, height, width };
4266
4267     std::vector<float> input0({
4268         1,  1,  1,  1,    2,  2,  2,  2,
4269         3,  3,  3,  3,    4,  4,  4,  4 });
4270
4271     std::vector<float> input1({
4272         2,  2,  2,  2,    3,  3,  3,  3,
4273         4,  4,  4,  4,    5,  5,  5,  5 });
4274
4275     std::vector<float> output({
4276         2,  2,  2,  2,    6,  6,  6,  6,
4277         12, 12, 12, 12,  20, 20, 20, 20 });
4278
4279     return MultiplicationTestHelper<4>(workloadFactory,
4280                                        memoryManager,
4281                                        shape,
4282                                        input0,
4283                                        shape,
4284                                        input1,
4285                                        shape,
4286                                        output);
4287 }
4288
4289 LayerTestResult<float,5> Multiplication5dTest(
4290     armnn::IWorkloadFactory& workloadFactory,
4291     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
4292 {
4293     const unsigned int width = 3;
4294     const unsigned int height = 2;
4295     const unsigned int channelCount = 2;
4296     const unsigned int batchSize = 2;
4297     const unsigned int depth = 2;
4298
4299     unsigned int shape[] = { depth, batchSize, channelCount, height, width };
4300
4301     std::vector<float> input0({
4302         1.80f, 0.20f, 2.30f,  1.30f, 2.10f, 1.00f,
4303         2.60f, 0.60f, 2.10f,  2.30f, 2.30f, 2.00f,
4304
4305         2.50f, 1.00f, 2.90f,  3.10f, 1.50f, 2.40f,
4306         2.80f, 1.10f, 1.00f,  3.20f, 1.00f, 2.30f,
4307
4308
4309         0.30f, 2.20f, 1.00f,  0.20f, 1.60f, 1.40f,
4310         0.80f, 3.20f, 0.10f,  0.10f, 3.10f, 2.10f,
4311
4312         1.50f, 2.40f, 1.40f,  0.70f, 2.40f, 1.40f,
4313         1.60f, 1.20f, 1.90f,  0.80f, 0.00f, 0.10f,
4314     });
4315
4316     std::vector<float> input1({
4317         0.70f, 1.00f, 2.90f,  2.20f, 3.10f, 2.80f,
4318         1.80f, 2.00f, 0.50f,  2.30f, 1.20f, 2.70f,
4319
4320         2.40f, 0.20f, 3.20f,  1.60f, 0.20f, 2.50f,
4321         2.30f, 0.70f, 2.70f,  1.80f, 2.90f, 2.70f,
4322
4323
4324         3.20f, 3.20f, 0.70f,  1.90f, 2.70f, 2.50f,
4325         2.40f, 0.90f, 2.30f,  1.80f, 2.50f, 2.00f,
4326
4327         1.60f, 2.20f, 1.60f,  2.00f, 0.30f, 3.20f,
4328         0.40f, 3.00f, 2.60f,  0.30f, 0.00f, 2.50f,
4329     });
4330
4331     std::vector<float> output({
4332         1.26f, 0.20f, 6.67f,  2.86f, 6.51f, 2.80f,
4333         4.68f, 1.20f, 1.05f,  5.29f, 2.76f, 5.40f,
4334
4335         6.00f, 0.20f, 9.28f,  4.96f, 0.30f, 6.00f,
4336         6.44f, 0.77f, 2.70f,  5.76f, 2.90f, 6.21f,
4337
4338
4339         0.96f, 7.04f, 0.70f,  0.38f, 4.32f, 3.50f,
4340         1.92f, 2.88f, 0.23f,  0.18f, 7.75f, 4.20f,
4341
4342         2.40f, 5.28f, 2.24f,  1.40f, 0.72f, 4.48f,
4343         0.64f, 3.60f, 4.94f,  0.24f, 0.00f, 0.25f,
4344     });
4345
4346     return MultiplicationTestHelper<5>(workloadFactory,
4347                                        memoryManager,
4348                                        shape,
4349                                        input0,
4350                                        shape,
4351                                        input1,
4352                                        shape,
4353                                        output);
4354 }
4355
4356 LayerTestResult<float, 4> MultiplicationBroadcast1ElementTest(
4357     armnn::IWorkloadFactory& workloadFactory,
4358     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
4359 {
4360     unsigned int shape0[] = { 1, 2, 2, 2 };
4361     std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
4362
4363     unsigned int shape1[] = { 1, 1, 1, 1 };
4364     std::vector<float> input1({ 2 });
4365
4366     std::vector<float> output({ 2, 4, 6, 8, 10, 12, 14, 16});
4367
4368     return MultiplicationTestHelper<4>(workloadFactory,
4369                                        memoryManager,
4370                                        shape0,
4371                                        input0,
4372                                        shape1,
4373                                        input1,
4374                                        shape0,
4375                                        output);
4376 }
4377
4378 LayerTestResult<float, 4> MultiplicationBroadcast1DVectorTest(
4379     armnn::IWorkloadFactory& workloadFactory,
4380     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
4381 {
4382     unsigned int shape0[] = { 1, 3, 3, 2 };
4383     std::vector<float> input0({
4384         1,   2,      3,  4,      5,  6,
4385         7,   8,      9, 10,     11, 12,
4386         13, 14,     15, 16,     17, 18});
4387
4388     unsigned int shape1[] = { 1, 1, 1, 2 };
4389     std::vector<float> input1({ 1, 2 });
4390
4391     std::vector<float> output({
4392         1,   4,       3,  8,      5, 12,
4393         7,   16,      9, 20,     11, 24,
4394         13,  28,     15, 32,     17, 36});
4395
4396     return MultiplicationTestHelper<4>(workloadFactory,
4397                                        memoryManager,
4398                                        shape0,
4399                                        input0,
4400                                        shape1,
4401                                        input1,
4402                                        shape0,
4403                                        output);
4404 }
4405
4406 LayerTestResult<float,4> CompareMultiplicationTest(
4407     armnn::IWorkloadFactory& workloadFactory,
4408     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4409     armnn::IWorkloadFactory& refWorkloadFactory)
4410 {
4411     const unsigned int width = 16;
4412     const unsigned int height = 32;
4413     const unsigned int channelCount = 2;
4414     const unsigned int batchSize = 5;
4415
4416     armnn::TensorInfo inputTensorInfo0;
4417     armnn::TensorInfo inputTensorInfo1;
4418     armnn::TensorInfo outputTensorInfo;
4419
4420     constexpr unsigned int shape[] = { batchSize, channelCount, height, width };
4421
4422     inputTensorInfo0 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
4423     inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
4424     outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
4425
4426     LayerTestResult<float,4> comparisonResult(outputTensorInfo);
4427
4428     auto input0 = MakeRandomTensor<float, 4>(inputTensorInfo0, 803506992);
4429     auto input1 = MakeRandomTensor<float, 4>(inputTensorInfo1, 54902257);
4430
4431     std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
4432     std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
4433     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4434
4435     std::unique_ptr<armnn::ITensorHandle> inputHandle0Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo0);
4436     std::unique_ptr<armnn::ITensorHandle> inputHandle1Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo1);
4437     std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
4438
4439     armnn::MultiplicationQueueDescriptor data;
4440     armnn::WorkloadInfo info;
4441     AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
4442     AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
4443     AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
4444
4445     armnn::MultiplicationQueueDescriptor refData = data;
4446     armnn::WorkloadInfo refInfo = info;
4447     SetWorkloadInput(refData, refInfo, 0, inputTensorInfo0, inputHandle0Ref.get());
4448     SetWorkloadInput(refData, refInfo, 1, inputTensorInfo1, inputHandle1Ref.get());
4449     SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
4450
4451     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
4452     std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateMultiplication(refData, refInfo);
4453
4454     inputHandle0->Allocate();
4455     inputHandle1->Allocate();
4456     outputHandle->Allocate();
4457     inputHandle0Ref->Allocate();
4458     inputHandle1Ref->Allocate();
4459     outputHandleRef->Allocate();
4460
4461     CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
4462     CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
4463     CopyDataToITensorHandle(inputHandle0Ref.get(), &input0[0][0][0][0]);
4464     CopyDataToITensorHandle(inputHandle1Ref.get(), &input1[0][0][0][0]);
4465
4466     workload->PostAllocationConfigure();
4467     workload->Execute();
4468     workloadRef->PostAllocationConfigure();
4469     workloadRef->Execute();
4470     CopyDataFromITensorHandle(&comparisonResult.output[0][0][0][0], outputHandle.get());
4471     CopyDataFromITensorHandle(&comparisonResult.outputExpected[0][0][0][0], outputHandleRef.get());
4472
4473     return comparisonResult;
4474 }
4475
4476 LayerTestResult<float,4> CompareBatchNormTest(
4477     armnn::IWorkloadFactory& workloadFactory,
4478     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4479     armnn::IWorkloadFactory& refWorkloadFactory)
4480 {
4481     const unsigned int width     = 2;
4482     const unsigned int height    = 3;
4483     const unsigned int channels  = 5;
4484     const unsigned int batchSize = 3;
4485
4486     armnn::TensorInfo inputTensorInfo;
4487     armnn::TensorInfo outputTensorInfo;
4488     armnn::TensorInfo tensorInfo;
4489
4490     constexpr unsigned int shape[]       = {batchSize, channels, height, width};
4491     constexpr unsigned int tensorShape[] = {channels};
4492
4493     inputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
4494     outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
4495     tensorInfo = armnn::TensorInfo(1, tensorShape, armnn::DataType::Float32);
4496
4497     auto input = MakeRandomTensor<float, 4>(inputTensorInfo, 21312);
4498
4499     auto mean     = MakeRandomTensor<float, 1>(tensorInfo, 123);
4500     auto variance = MakeRandomTensor<float, 1>(tensorInfo, 234, 0.0f);
4501     auto beta     = MakeRandomTensor<float, 1>(tensorInfo, 123);
4502     auto gamma    = MakeRandomTensor<float, 1>(tensorInfo, 345);
4503
4504     LayerTestResult<float,4> ret(outputTensorInfo);
4505
4506     std::unique_ptr<armnn::ITensorHandle> inputHandle  = workloadFactory.CreateTensorHandle(inputTensorInfo);
4507     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4508
4509     std::unique_ptr<armnn::ITensorHandle> inputHandleRef  = refWorkloadFactory.CreateTensorHandle(inputTensorInfo);
4510     std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
4511
4512     armnn::BatchNormalizationQueueDescriptor data;
4513     armnn::WorkloadInfo info;
4514     armnn::ScopedCpuTensorHandle meanTensor(tensorInfo);
4515     armnn::ScopedCpuTensorHandle varianceTensor(tensorInfo);
4516     armnn::ScopedCpuTensorHandle betaTensor(tensorInfo);
4517     armnn::ScopedCpuTensorHandle gammaTensor(tensorInfo);
4518
4519     AllocateAndCopyDataToITensorHandle(&meanTensor, &mean[0]);
4520     AllocateAndCopyDataToITensorHandle(&varianceTensor, &variance[0]);
4521     AllocateAndCopyDataToITensorHandle(&betaTensor, &beta[0]);
4522     AllocateAndCopyDataToITensorHandle(&gammaTensor, &gamma[0]);
4523
4524     AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
4525     AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
4526     data.m_Mean             = &meanTensor;
4527     data.m_Variance         = &varianceTensor;
4528     data.m_Beta             = &betaTensor;
4529     data.m_Gamma            = &gammaTensor;
4530     data.m_Parameters.m_Eps = 0.01f;
4531
4532     armnn::BatchNormalizationQueueDescriptor refData = data;
4533     armnn::WorkloadInfo refInfo = info;
4534     SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
4535     SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
4536
4537     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchNormalization(data, info);
4538     std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateBatchNormalization(refData, refInfo);
4539
4540     inputHandle->Allocate();
4541     outputHandle->Allocate();
4542     inputHandleRef->Allocate();
4543     outputHandleRef->Allocate();
4544
4545     CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
4546     CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]);
4547
4548     workload->PostAllocationConfigure();
4549     workload->Execute();
4550     workloadRef->PostAllocationConfigure();
4551     workloadRef->Execute();
4552
4553     CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
4554     CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
4555
4556     return ret;
4557 }
4558
4559 template<typename T>
4560 void PermuteTensorData(
4561         armnn::IWorkloadFactory& workloadFactory,
4562         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4563         const armnn::PermutationVector& mappings,
4564         armnn::TensorInfo & inputTensorInfo,
4565         const T * inputData,
4566         std::vector<T>& outputData)
4567 {
4568     BOOST_ASSERT_MSG(inputData != nullptr, "inputData must not be null");
4569     if (inputData == nullptr)
4570     {
4571         // Nullptr is an error in the test. By returning without doing the concatenation
4572         // I expect the caller to fail the test. It still makes sense to report this as
4573         // an assert for Debug builds.
4574         return;
4575     }
4576
4577     armnn::TensorInfo outputTensorInfo = armnnUtils::Permuted(inputTensorInfo, mappings);
4578
4579     std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
4580     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4581
4582     armnn::PermuteQueueDescriptor queueDescriptor;
4583     queueDescriptor.m_Parameters = armnn::PermuteDescriptor{mappings};
4584     armnn::WorkloadInfo workloadInfo;
4585     AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfo, inputHandle.get());
4586     AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
4587
4588     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePermute(queueDescriptor, workloadInfo);
4589
4590     inputHandle->Allocate();
4591     outputHandle->Allocate();
4592
4593     CopyDataToITensorHandle(inputHandle.get(), inputData);
4594
4595     workload->PostAllocationConfigure();
4596     workload->Execute();
4597
4598     outputData.resize(outputTensorInfo.GetNumElements());
4599     CopyDataFromITensorHandle(&outputData[0], outputHandle.get());
4600     inputTensorInfo = outputTensorInfo;
4601 }
4602
4603 armnn::OriginsDescriptor CreateDescriptorForConcatenation(
4604         const std::vector<armnn::TensorInfo> & inputTensorInfos,
4605         unsigned int concatDim)
4606 {
4607     std::vector<armnn::TensorShape> shapes;
4608     shapes.reserve(inputTensorInfos.size());
4609     for (const armnn::TensorInfo& it: inputTensorInfos)
4610     {
4611         shapes.push_back(it.GetShape());
4612     }
4613
4614     return armnn::CreateDescriptorForConcatenation(shapes.begin(),
4615                                                    shapes.end(),
4616                                                    concatDim);
4617 }
4618
4619 //
4620 // Concatenation is only supported for N and C dimensions for NCHW and the inner most dimension
4621 // In case of <4 dimensions we need to make sure that the concat dimensions are at least
4622 // the 3rd slowest iterating one or the inner most dimension.
4623 //
4624
4625 bool NeedPermuteForConcat(
4626         const std::vector<armnn::TensorInfo> & inputTensorInfos,
4627         unsigned int concatDim)
4628 {
4629     // See note above. Additionally we expect the input shapes to have the
4630     // same number of dimensions.
4631     unsigned int nDimensions = 0;
4632
4633     // Determine the number of dimensions as well as sanity check them
4634     // agains test implementation issues.
4635     for (auto && tensorInfo : inputTensorInfos)
4636     {
4637         if (!nDimensions)
4638         {
4639             nDimensions = tensorInfo.GetShape().GetNumDimensions();
4640         }
4641         else
4642         {
4643             BOOST_ASSERT_MSG(nDimensions == tensorInfo.GetShape().GetNumDimensions(),
4644                 "Input shapes must have the same number of dimensions");
4645         }
4646     }
4647
4648     return (nDimensions < 3 || (nDimensions == 3 && (nDimensions-concatDim) < 3 && (nDimensions-concatDim) != 1));
4649 }
4650
4651 armnn::TensorShape ExpandTensorShapeTo3dForPermute(const armnn::TensorShape & inputShape)
4652 {
4653     unsigned int numDims = inputShape.GetNumDimensions();
4654     if (numDims >= 3)
4655     {
4656         // Nothing to do if the inputShape has at least 3 dimensions.
4657         return inputShape;
4658     }
4659
4660     std::vector<unsigned int> newDims(size_t(3), 1u);
4661     unsigned int expandedBy = 3 - numDims;
4662     for (unsigned int i=0; i<numDims; ++i)
4663     {
4664         newDims[expandedBy+i] = inputShape[i];
4665     }
4666     return armnn::TensorShape(3u, &newDims[0]);
4667 }
4668
4669 void Generate3dPermuteVectorForConcat(
4670         unsigned int numDimensions,
4671         unsigned int & concatDim,
4672         std::pair<armnn::PermutationVector, armnn::PermutationVector> & permutations)
4673 {
4674     BOOST_ASSERT_MSG(numDimensions <= 3,
4675        "Only dimensions 1,2 and 3 are supported by this helper");
4676     unsigned int expandedBy = 3 - numDimensions;
4677     unsigned int expandedConcatAxis = concatDim + expandedBy;
4678
4679     if (expandedConcatAxis == 2)
4680     {
4681         concatDim = 0;
4682         armnn::PermutationVector forwardPermutation({1, 2, 0});
4683         armnn::PermutationVector reversePermutation({2, 0, 1});
4684         permutations = std::make_pair(forwardPermutation, reversePermutation);
4685     }
4686     else if (expandedConcatAxis == 1)
4687     {
4688         concatDim = 0;
4689         armnn::PermutationVector forwardPermutation({2, 0, 1});
4690         armnn::PermutationVector reversePermutation({1, 2, 0});
4691         permutations = std::make_pair(forwardPermutation, reversePermutation);
4692     }
4693     else
4694     {
4695         BOOST_ASSERT(expandedConcatAxis == 0);
4696         concatDim = 0;
4697     }
4698 }
4699
4700 //
4701 // Permute the input tensors so we can do a supported concatenation.
4702 // Also treat lower than 3d tensors as 3d by adding dummy 1 dimensions
4703 // at the front. Finally this function tells what the output shape
4704 // of the permuted concatenated tensor is going to be.
4705 //
4706 template <typename T>
4707 void PermuteInputsForConcat(
4708         armnn::IWorkloadFactory& workloadFactory,
4709         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4710         std::vector<armnn::TensorInfo> & inputTensorInfos,
4711         std::vector<T *> & inputData,
4712         std::vector<std::vector<T>> & inputDataStorage,
4713         armnn::PermutationVector & permuteVector,
4714         unsigned int & concatDim,
4715         armnn::TensorInfo & outputTensorInfo)
4716 {
4717     BOOST_ASSERT_MSG(inputTensorInfos.size() > 1,
4718         "Expecting more than one tensor to be concatenated here");
4719
4720     unsigned int numDims = 0;
4721     unsigned int nthInput = 0;
4722     const armnn::PermutationVector identity({0, 1, 2});
4723
4724     std::pair<armnn::PermutationVector, armnn::PermutationVector> permutations =
4725         std::make_pair(identity, identity);
4726
4727     inputDataStorage.resize(inputData.size());
4728
4729     for (auto && tensorInfo : inputTensorInfos)
4730     {
4731         if (numDims == 0)
4732         {
4733             numDims = tensorInfo.GetShape().GetNumDimensions();
4734             Generate3dPermuteVectorForConcat(numDims, concatDim, permutations);
4735
4736             // Store the reverese permutation.
4737             permuteVector = permutations.second;
4738             BOOST_ASSERT_MSG(!permuteVector.IsEqual(identity),
4739                 "Test logic error, we don't need permutation, so we shouldn't arrive here");
4740         }
4741         else
4742         {
4743             BOOST_ASSERT_MSG(numDims == tensorInfo.GetShape().GetNumDimensions(),
4744                 "All inputs must have the same number of dimensions");
4745         }
4746
4747         armnn::TensorInfo newTensorInfo = tensorInfo;
4748         newTensorInfo.SetShape(ExpandTensorShapeTo3dForPermute(tensorInfo.GetShape()));
4749
4750         PermuteTensorData<T>(workloadFactory,
4751                              memoryManager,
4752                              permutations.first,
4753                              newTensorInfo,
4754                              inputData[nthInput],
4755                              inputDataStorage[nthInput]);
4756
4757         inputData[nthInput] = inputDataStorage[nthInput].data();
4758         inputTensorInfos[nthInput] = newTensorInfo;
4759
4760         ++nthInput;
4761     }
4762
4763     outputTensorInfo.SetShape(
4764         armnnUtils::Permuted(
4765             ExpandTensorShapeTo3dForPermute(outputTensorInfo.GetShape()),
4766             permutations.first));
4767 }
4768
4769
4770 //
4771 // This is the pair of PermuteInputsForConcat(...) which permutes back
4772 // the output of the concatenation so we can check it against an expected
4773 // output.
4774 //
4775 template <typename T>
4776 void PermuteOutputForConcat(
4777         armnn::IWorkloadFactory& workloadFactory,
4778         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4779         const armnn::TensorInfo & tensorInfo,
4780         const armnn::PermutationVector & permuteVector,
4781         std::unique_ptr<armnn::ITensorHandle> && inputDataHandle,
4782         T * data)
4783 {
4784     BOOST_ASSERT_MSG(data != nullptr, "data must not be null");
4785     if (data == nullptr)
4786     {
4787         // Nullptr is an error in the test. By returning without doing the permutation
4788         // I expect the caller to fail the test. It still makes sense to report this as
4789         // an assert for Debug builds.
4790         return;
4791     }
4792
4793     armnn::TensorInfo resultTensorInfo = tensorInfo;
4794     std::vector<T> inputData(tensorInfo.GetNumElements());
4795     std::vector<T> outputData;
4796
4797     CopyDataFromITensorHandle(&inputData[0], inputDataHandle.get());
4798
4799     PermuteTensorData<T>(workloadFactory,
4800                          memoryManager,
4801                          permuteVector,
4802                          resultTensorInfo,
4803                          &inputData[0],
4804                          outputData);
4805
4806     ::memcpy(data, &outputData[0], sizeof(T)*outputData.size());
4807 }
4808
4809 template <typename T>
4810 void Concatenate(
4811     armnn::IWorkloadFactory& workloadFactory,
4812     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4813     std::initializer_list<const armnn::TensorInfo> inputTensorInfosOrig,
4814     std::initializer_list<T *> inputsOrig,
4815     const armnn::TensorInfo& outputTensorInfoOrig,
4816     T * output,
4817     unsigned int concatDim,
4818     bool useSubtensor)
4819 {
4820     BOOST_ASSERT_MSG(output != nullptr, "output must not be null");
4821     if (output == nullptr)
4822     {
4823         // Nullptr is an error in the test. By returning without doing the permutation
4824         // I expect the caller to fail the test. It still makes sense to report this as
4825         // an assert for Debug builds.
4826         return;
4827     }
4828
4829     // Saves a copy of the parameters which we might need to change.
4830     std::vector<armnn::TensorInfo> inputTensorInfos(inputTensorInfosOrig.begin(), inputTensorInfosOrig.end());
4831     std::vector<T *> inputs            = inputsOrig;
4832     armnn::TensorInfo outputTensorInfo = outputTensorInfoOrig;
4833
4834     armnn::PermutationVector permuteVector{0, 1, 2};
4835
4836     // Holds and automatically releases memory for the reshaped input data.
4837     std::vector<std::vector<T>> tmpInputDataStorage;
4838
4839     const size_t inputCount = inputTensorInfos.size();
4840
4841     bool needPermuteForConcat = NeedPermuteForConcat(inputTensorInfos, concatDim);
4842
4843     if (needPermuteForConcat)
4844     {
4845         //
4846         // We need to permute the inputs, because concatenation along
4847         // the requested axis is not supported.
4848         //
4849         PermuteInputsForConcat<T>(workloadFactory,
4850                                   memoryManager,
4851                                   inputTensorInfos,
4852                                   inputs,
4853                                   tmpInputDataStorage,
4854                                   permuteVector,
4855                                   concatDim,
4856                                   outputTensorInfo);
4857     }
4858
4859     armnn::WorkloadInfo workloadInfo;
4860
4861     std::vector<std::unique_ptr<armnn::ITensorHandle>> inputHandles;
4862     inputHandles.reserve(inputCount);
4863
4864     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4865
4866     armnn::ConcatQueueDescriptor queueDescriptor;
4867     armnn::OriginsDescriptor viewsDescriptor = CreateDescriptorForConcatenation(inputTensorInfos, concatDim);
4868     queueDescriptor.m_Parameters = viewsDescriptor;
4869
4870     if (useSubtensor)
4871     {
4872         queueDescriptor.m_ViewOrigins.reserve(viewsDescriptor.GetNumViews());
4873         for (unsigned int i = 0; i < viewsDescriptor.GetNumViews(); ++i)
4874         {
4875             queueDescriptor.m_ViewOrigins.emplace_back(std::vector<unsigned int>(viewsDescriptor.GetViewOrigin(i),
4876                 viewsDescriptor.GetViewOrigin(i) + viewsDescriptor.GetNumDimensions()));
4877         }
4878
4879         outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4880
4881         const bool subTensorsSupported = workloadFactory.SupportsSubTensors();
4882         for (unsigned int i = 0; i < inputCount; ++i)
4883         {
4884             const armnn::TensorInfo& inputTensorInfo = inputTensorInfos[i];
4885             std::unique_ptr<armnn::ITensorHandle> inputHandle =
4886                 subTensorsSupported ?
4887                     workloadFactory.CreateSubTensorHandle(*outputHandle,
4888                                                           inputTensorInfo.GetShape(),
4889                                                           queueDescriptor.m_ViewOrigins[i].m_Origin.data()) :
4890                     workloadFactory.CreateTensorHandle(inputTensorInfo);
4891
4892             inputHandles.emplace_back(std::move(inputHandle));
4893         }
4894
4895     }
4896     else
4897     {
4898         for (unsigned int i = 0; i < inputCount; ++i)
4899         {
4900             std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfos[i]);
4901             inputHandles.emplace_back(std::move(inputHandle));
4902         }
4903     }
4904
4905     for (unsigned int i = 0; i < inputCount; ++i)
4906     {
4907         AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfos[i], inputHandles[i].get());
4908     }
4909
4910     AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
4911
4912     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConcat(queueDescriptor, workloadInfo);
4913
4914     for (auto& inputHandle : inputHandles)
4915     {
4916         inputHandle->Allocate();
4917     }
4918
4919     outputHandle->Allocate();
4920
4921     unsigned int nextInputId = 0;
4922     for (auto& inputHandle : inputHandles)
4923     {
4924         CopyDataToITensorHandle(inputHandle.get(), inputs[nextInputId]);
4925         ++nextInputId;
4926     }
4927
4928     workload->PostAllocationConfigure();
4929     workload->Execute();
4930
4931     if (needPermuteForConcat)
4932     {
4933         PermuteOutputForConcat<T>(workloadFactory,
4934                                   memoryManager,
4935                                   outputTensorInfo,
4936                                   permuteVector,
4937                                   std::move(outputHandle),
4938                                   output);
4939     }
4940     else
4941     {
4942         CopyDataFromITensorHandle(output, outputHandle.get());
4943     }
4944 }
4945
4946 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
4947 LayerTestResult<T, 1> Concatenation1dTestImpl(
4948     armnn::IWorkloadFactory& workloadFactory,
4949     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4950     float qScale,
4951     int32_t qOffset)
4952 {
4953     armnn::TensorInfo inputTensorInfo({ 3 }, ArmnnType, qScale, qOffset);
4954
4955     auto input0 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 1.0f, 2.0f, 3.0f }));
4956     auto input1 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 4.0f, 5.0f, 6.0f }));
4957     auto input2 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 7.0f, 8.0f, 9.0f }));
4958
4959     armnn::TensorInfo outputTensorInfo({ 9 }, ArmnnType, qScale, qOffset);
4960
4961     LayerTestResult<T, 1> result(outputTensorInfo);
4962
4963     std::vector<T> output;
4964     output.resize(outputTensorInfo.GetNumElements());
4965     Concatenate<T>(workloadFactory, memoryManager,
4966                    { inputTensorInfo, inputTensorInfo, inputTensorInfo },
4967                    { input0.data(), input1.data(), input2.data() },
4968                    outputTensorInfo,
4969                    output.data(),
4970                    0,
4971                    true);
4972
4973     result.output = MakeTensor<T, 1>(outputTensorInfo, output);
4974     result.outputExpected = MakeTensor<T, 1>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4975         1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f
4976     }));
4977
4978     return result;
4979 }
4980
4981 LayerTestResult<float, 1> Concatenation1dTest(
4982     armnn::IWorkloadFactory& workloadFactory,
4983     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
4984 {
4985     return Concatenation1dTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
4986 }
4987
4988 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
4989 LayerTestResult<T, 2> Concatenation2dTestImpl(
4990     armnn::IWorkloadFactory& workloadFactory,
4991     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4992     const armnn::TensorInfo& outputTensorInfo,
4993     unsigned int dimension,
4994     const float qScale,
4995     const int32_t qOffset)
4996 {
4997     armnn::TensorInfo inputTensorInfo({ 2, 3 }, ArmnnType, qScale, qOffset);
4998
4999     auto input0 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5000         // Batch 0
5001         1.0f, 2.0f, 3.0f,
5002
5003         // Batch 1
5004         10.0f, 11.0f, 12.0f,
5005     }));
5006
5007     auto input1 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5008         // Batch 0
5009         4.0f, 5.0f, 6.0f,
5010
5011         // Batch 1
5012         13.0f, 14.0f, 15.0f,
5013     }));
5014
5015     auto input2 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5016         // Batch 0
5017         7.0f, 8.0f, 9.0f,
5018
5019         // Batch 1
5020         16.0f, 17.0f, 18.0f,
5021     }));
5022
5023     LayerTestResult<T, 2> result(outputTensorInfo);
5024
5025     std::vector<T> output;
5026     output.resize(outputTensorInfo.GetNumElements());
5027     Concatenate<T>(workloadFactory, memoryManager,
5028                    { inputTensorInfo, inputTensorInfo, inputTensorInfo },
5029                    { input0.data(), input1.data(), input2.data() },
5030                    outputTensorInfo,
5031                    output.data(),
5032                    dimension,
5033                    true);
5034
5035     result.output = MakeTensor<T, 2>(outputTensorInfo, output);
5036     return result;
5037 }
5038
5039 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
5040 LayerTestResult<T, 2> Concatenation2dDim0TestImpl(
5041     armnn::IWorkloadFactory& workloadFactory,
5042     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5043     float qScale,
5044     int32_t qOffset)
5045 {
5046     armnn::TensorInfo outputTensorInfo({ 6, 3 }, ArmnnType, qScale, qOffset);
5047
5048     LayerTestResult<T, 2> result = Concatenation2dTestImpl<ArmnnType>(
5049         workloadFactory, memoryManager, outputTensorInfo, 0, qScale, qOffset);
5050
5051     result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5052         // Batch 0
5053         1.0f, 2.0f, 3.0f,
5054
5055         // Batch 1
5056         10.0f, 11.0f, 12.0f,
5057
5058         // Batch 2
5059         4.0f, 5.0f, 6.0f,
5060
5061         // Batch 3
5062         13.0f, 14.0f, 15.0f,
5063
5064         // Batch 4
5065         7.0f, 8.0f, 9.0f,
5066
5067         // Batch 5
5068         16.0f, 17.0f, 18.0f,
5069     }));
5070
5071     return result;
5072 }
5073
5074 LayerTestResult<float, 2> Concatenation2dDim0Test(
5075     armnn::IWorkloadFactory& workloadFactory,
5076     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
5077 {
5078     return Concatenation2dDim0TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
5079 }
5080
5081 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
5082 LayerTestResult<T, 2> Concatenation2dDim1TestImpl(
5083     armnn::IWorkloadFactory& workloadFactory,
5084     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5085     float qScale,
5086     int32_t qOffset)
5087 {
5088     armnn::TensorInfo outputTensorInfo({ 2, 9 }, ArmnnType, qScale, qOffset);
5089
5090     LayerTestResult<T, 2> result = Concatenation2dTestImpl<ArmnnType>(
5091         workloadFactory, memoryManager, outputTensorInfo, 1, qScale, qOffset);
5092
5093     result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5094         // Batch 0
5095         1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f,
5096
5097         // Batch 1
5098         10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f
5099     }));
5100
5101     return result;
5102 }
5103
5104 LayerTestResult<float, 2> Concatenation2dDim1Test(
5105     armnn::IWorkloadFactory& workloadFactory,
5106     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
5107 {
5108     return Concatenation2dDim1TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
5109 }
5110
5111 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
5112 LayerTestResult<T, 2> Concatenation2dDim0DiffInputDimsTestImpl(
5113     armnn::IWorkloadFactory& workloadFactory,
5114     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5115     float qScale,
5116     int32_t qOffset)
5117 {
5118     armnn::TensorInfo input0TensorInfo({ 2, 3 }, ArmnnType, qScale, qOffset);
5119     auto input0 = MakeTensor<T, 2>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5120         // Batch 0
5121         1.0f, 2.0f, 3.0f,
5122
5123         // Batch 1
5124         10.0f, 11.0f, 12.0f,
5125     }));
5126
5127     armnn::TensorInfo input1TensorInfo({ 3, 3 }, ArmnnType, qScale, qOffset);
5128     auto input1 = MakeTensor<T, 2>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5129         // Batch 0
5130         4.0f, 5.0f, 6.0f,
5131
5132         // Batch 1
5133         13.0f, 14.0f, 15.0f,
5134
5135         // Batch 0
5136         7.0f, 8.0f, 9.0f,
5137     }));
5138
5139     armnn::TensorInfo input2TensorInfo({ 1, 3 }, ArmnnType, qScale, qOffset);
5140     auto input2 = MakeTensor<T, 2>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5141         // Batch 1
5142         16.0f, 17.0f, 18.0f,
5143     }));
5144
5145     armnn::TensorInfo outputTensorInfo({ 6, 3 }, ArmnnType, qScale, qOffset);
5146     LayerTestResult<T, 2> result(outputTensorInfo);
5147
5148     std::vector<T> output;
5149     output.resize(outputTensorInfo.GetNumElements());
5150     Concatenate<T>(workloadFactory, memoryManager,
5151                    { input0TensorInfo, input1TensorInfo, input2TensorInfo },
5152                    { input0.data(), input1.data(), input2.data() },
5153                    outputTensorInfo,
5154                    output.data(),
5155                    0,
5156                    true);
5157
5158     result.output = MakeTensor<T, 2>(outputTensorInfo, output);
5159     result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5160         // Batch 0
5161         1.0f, 2.0f, 3.0f,
5162
5163         // Batch 1
5164         10.0f, 11.0f, 12.0f,
5165
5166         // Batch 2
5167         4.0f, 5.0f, 6.0f,
5168
5169         // Batch 3
5170         13.0f, 14.0f, 15.0f,
5171
5172         // Batch 4
5173         7.0f, 8.0f, 9.0f,
5174
5175         // Batch 5
5176         16.0f, 17.0f, 18.0f,
5177     }));
5178
5179     return result;
5180 }
5181
5182 LayerTestResult<float, 2> Concatenation2dDim0DiffInputDimsTest(
5183     armnn::IWorkloadFactory& workloadFactory,
5184     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
5185 {
5186     return Concatenation2dDim0DiffInputDimsTestImpl<armnn::DataType::Float32>(
5187         workloadFactory, memoryManager, 0.0f, 0);
5188 }
5189
5190 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
5191 LayerTestResult<T, 2> Concatenation2dDim1DiffInputDimsTestImpl(
5192     armnn::IWorkloadFactory& workloadFactory,
5193     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5194     float qScale,
5195     int32_t qOffset)
5196 {
5197     armnn::TensorInfo input0TensorInfo({ 2, 3 }, ArmnnType, qScale, qOffset);
5198     auto input0 = MakeTensor<T, 2>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5199         // Batch 0
5200         1.0f, 2.0f, 3.0f,
5201
5202         // Batch 1
5203         10.0f, 11.0f, 12.0f,
5204     }));
5205
5206     armnn::TensorInfo input1TensorInfo({ 2, 5 }, ArmnnType, qScale, qOffset);
5207     auto input1 = MakeTensor<T, 2>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5208         // Batch 0
5209         4.0f, 5.0f, 6.0f, 7.0f, 8.0f,
5210
5211         // Batch 1
5212         13.0f, 14.0f, 15.0f, 16.0f, 17.0f,
5213     }));
5214
5215     armnn::TensorInfo input2TensorInfo({ 2, 1 }, ArmnnType, qScale, qOffset);
5216     auto input2 = MakeTensor<T, 2>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5217         // Batch 0
5218         9.0f,
5219
5220         // Batch 1
5221         18.0f
5222     }));
5223
5224     armnn::TensorInfo outputTensorInfo({ 2, 9 }, ArmnnType, qScale, qOffset);
5225     LayerTestResult<T, 2> result(outputTensorInfo);
5226
5227     std::vector<T> output;
5228     output.resize(outputTensorInfo.GetNumElements());
5229     Concatenate<T>(workloadFactory, memoryManager,
5230                    { input0TensorInfo, input1TensorInfo, input2TensorInfo },
5231                    { input0.data(), input1.data(), input2.data() },
5232                    outputTensorInfo,
5233                    output.data(),
5234                    1,
5235                    true);
5236
5237     result.output = MakeTensor<T, 2>(outputTensorInfo, output);
5238     result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5239         // Batch 0
5240         1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f,
5241
5242         // Batch 1
5243         10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f,
5244     }));
5245
5246     return result;
5247 }
5248
5249 LayerTestResult<float, 2> Concatenation2dDim1DiffInputDimsTest(
5250     armnn::IWorkloadFactory& workloadFactory,
5251     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
5252 {
5253     return Concatenation2dDim1DiffInputDimsTestImpl<armnn::DataType::Float32>(
5254         workloadFactory, memoryManager, 0.0f, 0);
5255 }
5256
5257 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
5258 LayerTestResult<T, 3> Concatenation3dTestImpl(
5259     armnn::IWorkloadFactory& workloadFactory,
5260     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5261     const armnn::TensorInfo& outputTensorInfo,
5262     unsigned int dimension,
5263     bool useSubtensor,
5264     float qScale,
5265     int32_t qOffset)
5266 {
5267     armnn::TensorInfo inputTensorInfo({ 2, 3, 2 }, ArmnnType, qScale, qOffset);
5268
5269     auto input0 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5270         // Batch 0, Channel 0
5271         1.0f, 2.0f,
5272
5273         // Batch 0, Channel 1
5274         3.0f, 4.0f,
5275
5276         // Batch 0, Channel 2
5277         5.0f, 6.0f,
5278
5279         // Batch 1, Channel 0
5280         19.0f, 20.0f,
5281
5282         // Batch 1, Channel 1
5283         21.0f, 22.0f,
5284
5285         // Batch 1, Channel 2
5286         23.0f, 24.0f
5287     }));
5288
5289     auto input1 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5290         // Batch 0, Channel 0
5291         7.0f, 8.0f,
5292
5293         // Batch 0, Channel 1
5294         9.0f, 10.0f,
5295
5296         // Batch 0, Channel 2
5297         11.0f, 12.0f,
5298
5299         // Batch 1, Channel 0
5300         25.0f, 26.0f,
5301
5302         // Batch 1, Channel 1
5303         27.0f, 28.0f,
5304
5305         // Batch 1, Channel 2
5306         29.0f, 30.0f
5307     }));
5308
5309     auto input2 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5310         // Batch 0, Channel 0
5311         13.0f, 14.0f,
5312
5313         // Batch 0, Channel 1
5314         15.0f, 16.0f,
5315
5316         // Batch 0, Channel 2
5317         17.0f, 18.0f,
5318
5319         // Batch 1, Channel 0
5320         31.0f, 32.0f,
5321
5322         // Batch 1, Channel 1
5323         33.0f, 34.0f,
5324
5325         // Batch 1, Channel 2
5326         35.0f, 36.0f
5327     }));
5328
5329     LayerTestResult<T, 3> result(outputTensorInfo);
5330
5331     std::vector<T> output;
5332     output.resize(outputTensorInfo.GetNumElements());
5333     Concatenate<T>(workloadFactory, memoryManager,
5334                    { inputTensorInfo, inputTensorInfo, inputTensorInfo },
5335                    { input0.data(), input1.data(), input2.data() },
5336                    outputTensorInfo,
5337                    output.data(),
5338                    dimension,
5339                    useSubtensor);
5340
5341     result.output = MakeTensor<T, 3>(outputTensorInfo, output);
5342     return result;
5343 }
5344
5345 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
5346 LayerTestResult<T, 3> Concatenation3dDim0TestImpl(
5347     armnn::IWorkloadFactory& workloadFactory,
5348     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5349     float qScale,
5350     int32_t qOffset)
5351 {
5352     armnn::TensorInfo outputTensorInfo({ 6, 3, 2 }, ArmnnType, qScale, qOffset);
5353
5354     LayerTestResult<T, 3> result = Concatenation3dTestImpl<ArmnnType>(
5355         workloadFactory, memoryManager, outputTensorInfo, 0, true, qScale, qOffset);
5356
5357     result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5358         // Batch 0, Channel 0
5359         1.0f, 2.0f,
5360
5361         // Batch 0, Channel 1
5362         3.0f, 4.0f,
5363
5364         // Batch 0, Channel 2
5365         5.0f, 6.0f,
5366
5367         // Batch 1, Channel 0
5368         19.0f, 20.0f,
5369
5370         // Batch 1, Channel 1
5371         21.0f, 22.0f,
5372
5373         // Batch 1, Channel 2
5374         23.0f, 24.0f,
5375
5376         // Batch 2, Channel 0
5377         7.0f, 8.0f,
5378
5379         // Batch 2, Channel 1
5380         9.0f, 10.0f,
5381
5382         // Batch 2, Channel 2
5383         11.0f, 12.0f,
5384
5385         // Batch 3, Channel 0
5386         25.0f, 26.0f,
5387
5388         // Batch 3, Channel 1
5389         27.0f, 28.0f,
5390
5391         // Batch 3, Channel 2
5392         29.0f, 30.0f,
5393
5394         // Batch 4, Channel 0
5395         13.0f, 14.0f,
5396
5397         // Batch 4, Channel 1
5398         15.0f, 16.0f,
5399
5400         // Batch 4, Channel 2
5401         17.0f, 18.0f,
5402
5403         // Batch 5, Channel 0
5404         31.0f, 32.0f,
5405
5406         // Batch 5, Channel 1
5407         33.0f, 34.0f,
5408
5409         // Batch 5, Channel 2
5410         35.0f, 36.0f
5411     }));
5412
5413     return result;
5414 }
5415
5416 LayerTestResult<float, 3> Concatenation3dDim0Test(
5417     armnn::IWorkloadFactory& workloadFactory,
5418     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
5419 {
5420     return Concatenation3dDim0TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
5421 }
5422
5423 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
5424 LayerTestResult<T, 3> Concatenation3dDim1TestImpl(
5425     armnn::IWorkloadFactory& workloadFactory,
5426     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5427     float qScale,
5428     int32_t qOffset)
5429 {
5430     armnn::TensorInfo outputTensorInfo({ 2, 9, 2 }, ArmnnType, qScale, qOffset);
5431
5432     LayerTestResult<T, 3> result = Concatenation3dTestImpl<ArmnnType>(
5433         workloadFactory, memoryManager, outputTensorInfo, 1, true, qScale, qOffset);
5434
5435     result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5436         // Batch 0, Channel 0
5437         1.0f, 2.0f,
5438
5439         // Batch 0, Channel 1
5440         3.0f, 4.0f,
5441
5442         // Batch 0, Channel 2
5443         5.0f, 6.0f,
5444
5445         // Batch 0, Channel 3
5446         7.0f, 8.0f,
5447
5448         // Batch 0, Channel 4
5449         9.0f, 10.0f,
5450
5451         // Batch 0, Channel 5
5452         11.0f, 12.0f,
5453
5454         // Batch 0, Channel 6
5455         13.0f, 14.0f,
5456
5457         // Batch 0, Channel 7
5458         15.0f, 16.0f,
5459
5460         // Batch 0, Channel 8
5461         17.0f, 18.0f,
5462
5463         // Batch 1, Channel 0
5464         19.0f, 20.0f,
5465
5466         // Batch 1, Channel 1
5467         21.0f, 22.0f,
5468
5469         // Batch 1, Channel 2
5470         23.0f, 24.0f,
5471
5472         // Batch 1, Channel 3
5473         25.0f, 26.0f,
5474
5475         // Batch 1, Channel 4
5476         27.0f, 28.0f,
5477
5478         // Batch 1, Channel 5
5479         29.0f, 30.0f,
5480
5481         // Batch 1, Channel 6
5482         31.0f, 32.0f,
5483
5484         // Batch 1, Channel 7
5485         33.0f, 34.0f,
5486
5487         // Batch 1, Channel 8
5488         35.0f, 36.0f
5489     }));
5490
5491     return result;
5492 }
5493
5494 LayerTestResult<float, 3> Concatenation3dDim1Test(
5495     armnn::IWorkloadFactory& workloadFactory,
5496     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
5497 {
5498     return Concatenation3dDim1TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
5499 }
5500
5501 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
5502 LayerTestResult<T, 3> Concatenation3dDim2TestImpl(
5503     armnn::IWorkloadFactory& workloadFactory,
5504     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5505     bool useSubtensor,
5506     float qScale,
5507     int32_t qOffset)
5508 {
5509     armnn::TensorInfo outputTensorInfo({ 2, 3, 6 }, ArmnnType, qScale, qOffset);
5510
5511     LayerTestResult<T, 3> result = Concatenation3dTestImpl<ArmnnType>(
5512         workloadFactory, memoryManager, outputTensorInfo, 2, useSubtensor, qScale, qOffset);
5513
5514     result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5515         // Batch 0, Channel 0
5516         1.0f, 2.0f, 7.0f, 8.0f, 13.0f, 14.0f,
5517
5518         // Batch 0, Channel 1
5519         3.0f, 4.0f, 9.0f, 10.0f, 15.0f, 16.0f,
5520
5521         // Batch 0, Channel 2
5522         5.0f, 6.0f, 11.0f, 12.0f, 17.0f, 18.0f,
5523
5524         // Batch 1, Channel 0
5525         19.0f, 20.0f, 25.0f, 26.0f, 31.0f, 32.0f,
5526
5527         // Batch 1, Channel 1
5528         21.0f, 22.0f, 27.0f, 28.0f, 33.0f, 34.0f,
5529
5530         // Batch 1, Channel 2
5531         23.0f, 24.0f, 29.0f, 30.0f, 35.0f, 36.0f,
5532     }));
5533
5534     return result;
5535 }
5536
5537 LayerTestResult<float, 3> Concatenation3dDim2Test(
5538     armnn::IWorkloadFactory& workloadFactory,
5539     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5540     bool useSubtensor)
5541 {
5542     return Concatenation3dDim2TestImpl<armnn::DataType::Float32>(
5543         workloadFactory, memoryManager, useSubtensor, 0.0f, 0);
5544 }
5545
5546 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
5547 LayerTestResult<T, 3> Concatenation3dDim0DiffInputDimsTestImpl(
5548     armnn::IWorkloadFactory& workloadFactory,
5549     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5550     float qScale,
5551     int32_t qOffset)
5552 {
5553     armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType);
5554     auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5555             // Batch 0, Channel 0
5556             1.0f, 2.0f,
5557
5558             // Batch 0, Channel 1
5559             3.0f, 4.0f,
5560
5561             // Batch 0, Channel 2
5562             5.0f, 6.0f,
5563
5564             // Batch 1, Channel 0
5565             19.0f, 20.0f,
5566
5567             // Batch 1, Channel 1
5568             21.0f, 22.0f,
5569
5570             // Batch 1, Channel 2
5571             23.0f, 24.0f
5572     }));
5573
5574     armnn::TensorInfo input1TensorInfo({ 1, 3, 2 }, ArmnnType);
5575     auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5576             // Batch 0, Channel 0
5577             7.0f, 8.0f,
5578
5579             // Batch 0, Channel 1
5580             9.0f, 10.0f,
5581
5582             // Batch 0, Channel 2
5583             11.0f, 12.0f,
5584     }));
5585
5586     armnn::TensorInfo input2TensorInfo({ 3, 3, 2 }, ArmnnType);
5587     auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5588             // Batch 0, Channel 0
5589             25.0f, 26.0f,
5590
5591             // Batch 0, Channel 1
5592             27.0f, 28.0f,
5593
5594             // Batch 0, Channel 2
5595             29.0f, 30.0f,
5596
5597             // Batch 1, Channel 0
5598             13.0f, 14.0f,
5599
5600             // Batch 1, Channel 1
5601             15.0f, 16.0f,
5602
5603             // Batch 1, Channel 2
5604             17.0f, 18.0f,
5605
5606             // Batch 2, Channel 0
5607             31.0f, 32.0f,
5608
5609             // Batch 2, Channel 1
5610             33.0f, 34.0f,
5611
5612             // Batch 2, Channel 2
5613             35.0f, 36.0f
5614     }));
5615
5616     armnn::TensorInfo outputTensorInfo({ 6, 3, 2 }, ArmnnType);
5617     LayerTestResult<T, 3> result(outputTensorInfo);
5618
5619     std::vector<T> output;
5620     output.resize(outputTensorInfo.GetNumElements());
5621     Concatenate<T>(workloadFactory, memoryManager,
5622                    { input0TensorInfo, input1TensorInfo, input2TensorInfo },
5623                    { input0.data(), input1.data(), input2.data() },
5624                    outputTensorInfo,
5625                    output.data(),
5626                    0,
5627                    true);
5628
5629     result.output = MakeTensor<T, 3>(outputTensorInfo, output);
5630     result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5631         // Batch 0, Channel 0
5632         1.0f, 2.0f,
5633
5634         // Batch 0, Channel 1
5635         3.0f, 4.0f,
5636
5637         // Batch 0, Channel 2
5638         5.0f, 6.0f,
5639
5640         // Batch 1, Channel 0
5641         19.0f, 20.0f,
5642
5643         // Batch 1, Channel 1
5644         21.0f, 22.0f,
5645
5646         // Batch 1, Channel 2
5647         23.0f, 24.0f,
5648
5649         // Batch 2, Channel 0
5650         7.0f, 8.0f,
5651
5652         // Batch 2, Channel 1
5653         9.0f, 10.0f,
5654
5655         // Batch 2, Channel 2
5656         11.0f, 12.0f,
5657
5658         // Batch 3, Channel 0
5659         25.0f, 26.0f,
5660
5661         // Batch 3, Channel 1
5662         27.0f, 28.0f,
5663
5664         // Batch 3, Channel 2
5665         29.0f, 30.0f,
5666
5667         // Batch 4, Channel 0
5668         13.0f, 14.0f,
5669
5670         // Batch 4, Channel 1
5671         15.0f, 16.0f,
5672
5673         // Batch 4, Channel 2
5674         17.0f, 18.0f,
5675
5676         // Batch 5, Channel 0
5677         31.0f, 32.0f,
5678
5679         // Batch 5, Channel 1
5680         33.0f, 34.0f,
5681
5682         // Batch 5, Channel 2
5683         35.0f, 36.0f
5684     }));
5685
5686     return result;
5687 }
5688
5689 LayerTestResult<float, 3> Concatenation3dDim0DiffInputDimsTest(
5690     armnn::IWorkloadFactory& workloadFactory,
5691     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
5692 {
5693     return Concatenation3dDim0DiffInputDimsTestImpl<armnn::DataType::Float32>(
5694         workloadFactory, memoryManager, 0.0f, 0);
5695 }
5696
5697 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
5698 LayerTestResult<T, 3> Concatenation3dDim1DiffInputDimsTestImpl(
5699     armnn::IWorkloadFactory& workloadFactory,
5700     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5701     float qScale,
5702     int32_t qOffset)
5703 {
5704     armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType, qScale, qOffset);
5705     auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5706         // Batch 0, Channel 0
5707         1.0f, 2.0f,
5708
5709         // Batch 0, Channel 1
5710         3.0f, 4.0f,
5711
5712         // Batch 0, Channel 2
5713         5.0f, 6.0f,
5714
5715         // Batch 1, Channel 0
5716         19.0f, 20.0f,
5717
5718         // Batch 1, Channel 1
5719         21.0f, 22.0f,
5720
5721         // Batch 1, Channel 2
5722         23.0f, 24.0f
5723     }));
5724
5725     armnn::TensorInfo input1TensorInfo({ 2, 4, 2 }, ArmnnType, qScale, qOffset);
5726     auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5727         // Batch 0, Channel 0
5728         7.0f, 8.0f,
5729
5730         // Batch 0, Channel 1
5731         9.0f, 10.0f,
5732
5733         // Batch 0, Channel 2
5734         11.0f, 12.0f,
5735
5736         // Batch 0, Channel 3
5737         25.0f, 26.0f,
5738
5739         // Batch 1, Channel 0
5740         27.0f, 28.0f,
5741
5742         // Batch 1, Channel 1
5743         29.0f, 30.0f,
5744
5745         // Batch 1, Channel 2
5746         13.0f, 14.0f,
5747
5748         // Batch 1, Channel 3
5749         15.0f, 16.0f,
5750     }));
5751
5752     armnn::TensorInfo input2TensorInfo({ 2, 1, 2 }, ArmnnType, qScale, qOffset);
5753     auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5754         // Batch 0, Channel 0
5755         17.0f, 18.0f,
5756
5757         // Batch 1, Channel 0
5758         31.0f, 32.0f,
5759     }));
5760
5761     armnn::TensorInfo outputTensorInfo({ 2, 8, 2 }, ArmnnType, qScale, qOffset);
5762     LayerTestResult<T, 3> result(outputTensorInfo);
5763
5764     std::vector<T> output;
5765     output.resize(outputTensorInfo.GetNumElements());
5766     Concatenate<T>(workloadFactory, memoryManager,
5767                    { input0TensorInfo, input1TensorInfo, input2TensorInfo },
5768                    { input0.data(), input1.data(), input2.data() },
5769                    outputTensorInfo,
5770                    output.data(),
5771                    1,
5772                    true);
5773
5774     result.output = MakeTensor<T, 3>(outputTensorInfo, output);
5775     result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5776         // Batch 0, Channel 0
5777         1.0f, 2.0f,
5778
5779         // Batch 0, Channel 1
5780         3.0f, 4.0f,
5781
5782         // Batch 0, Channel 2
5783         5.0f, 6.0f,
5784
5785         // Batch 0, Channel 3
5786         7.0f, 8.0f,
5787
5788         // Batch 0, Channel 4
5789         9.0f, 10.0f,
5790
5791         // Batch 0, Channel 5
5792         11.0f, 12.0f,
5793
5794         // Batch 0, Channel 6
5795         25.0f, 26.0f,
5796
5797         // Batch 0, Channel 7
5798         17.0f, 18.0f,
5799
5800         // Batch 1, Channel 0
5801         19.0f, 20.0f,
5802
5803         // Batch 1, Channel 1
5804         21.0f, 22.0f,
5805
5806         // Batch 1, Channel 2
5807         23.0f, 24.0f,
5808
5809         // Batch 1, Channel 3
5810         27.0f, 28.0f,
5811
5812         // Batch 1, Channel 4
5813         29.0f, 30.0f,
5814
5815         // Batch 1, Channel 5
5816         13.0f, 14.0f,
5817
5818         // Batch 1, Channel 6
5819         15.0f, 16.0f,
5820
5821         // Batch 1, Channel 7
5822         31.0f, 32.0f,
5823     }));
5824
5825     return result;
5826 }
5827
5828 LayerTestResult<float, 3> Concatenation3dDim1DiffInputDimsTest(
5829     armnn::IWorkloadFactory& workloadFactory,
5830     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
5831 {
5832     return Concatenation3dDim1DiffInputDimsTestImpl<armnn::DataType::Float32>(
5833         workloadFactory, memoryManager, 0.0f, 0);
5834 }
5835
5836 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
5837 LayerTestResult<T, 3> Concatenation3dDim2DiffInputDimsTestImpl(
5838     armnn::IWorkloadFactory& workloadFactory,
5839     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5840     bool useSubtensor,
5841     float qScale,
5842     int32_t qOffset)
5843 {
5844     armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType, qScale, qOffset);
5845     auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5846         // Batch 0, Channel 0
5847         1.0f, 2.0f,
5848
5849         // Batch 0, Channel 1
5850         3.0f, 4.0f,
5851
5852         // Batch 0, Channel 2
5853         5.0f, 6.0f,
5854
5855         // Batch 1, Channel 0
5856         19.0f, 20.0f,
5857
5858         // Batch 1, Channel 1
5859         21.0f, 22.0f,
5860
5861         // Batch 1, Channel 2
5862         23.0f, 24.0f
5863     }));
5864
5865     armnn::TensorInfo input1TensorInfo({ 2, 3, 1 }, ArmnnType, qScale, qOffset);
5866     auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5867         // Batch 0, Channel 0
5868         7.0f,
5869
5870         // Batch 0, Channel 1
5871         9.0f,
5872
5873         // Batch 0, Channel 2
5874         11.0f,
5875
5876         // Batch 1, Channel 0
5877         25.0f,
5878
5879         // Batch 1, Channel 1
5880         27.0f,
5881
5882         // Batch 1, Channel 2
5883         29.0f
5884     }));
5885
5886     armnn::TensorInfo input2TensorInfo({ 2, 3, 3 }, ArmnnType, qScale, qOffset);
5887     auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5888         // Batch 0, Channel 0
5889         13.0f, 14.0f, 50.0f,
5890
5891         // Batch 0, Channel 1
5892         15.0f, 16.0f, 51.0f,
5893
5894         // Batch 0, Channel 2
5895         17.0f, 18.0f, 52.0f,
5896
5897         // Batch 1, Channel 0
5898         31.0f, 32.0f, 53.0f,
5899
5900         // Batch 1, Channel 1
5901         33.0f, 34.0f, 54.0f,
5902
5903         // Batch 1, Channel 2
5904         35.0f, 36.0f, 55.0f,
5905     }));
5906
5907     armnn::TensorInfo outputTensorInfo({ 2, 3, 6 }, ArmnnType, qScale, qOffset);
5908     LayerTestResult<T, 3> result(outputTensorInfo);
5909
5910     std::vector<T> output;
5911     output.resize(outputTensorInfo.GetNumElements());
5912     Concatenate<T>(workloadFactory, memoryManager,
5913                    { input0TensorInfo, input1TensorInfo, input2TensorInfo },
5914                    { input0.data(), input1.data(), input2.data() },
5915                    outputTensorInfo,
5916                    output.data(),
5917                    2,
5918                    useSubtensor);
5919
5920     result.output = MakeTensor<T, 3>(outputTensorInfo, output);
5921     result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5922         // Batch 0, Channel 0
5923         1.0f, 2.0f, 7.0f, 13.0f, 14.0f, 50.0f,
5924
5925         // Batch 0, Channel 1
5926         3.0f, 4.0f, 9.0f, 15.0f, 16.0f, 51.0f,
5927
5928         // Batch 0, Channel 2
5929         5.0f, 6.0f, 11.0f, 17.0f, 18.0f, 52.0f,
5930
5931         // Batch 1, Channel 0
5932         19.0f, 20.0f, 25.0f, 31.0f, 32.0f, 53.0f,
5933
5934         // Batch 1, Channel 1
5935         21.0f, 22.0f, 27.0f, 33.0f, 34.0f, 54.0f,
5936
5937         // Batch 1, Channel 2
5938         23.0f, 24.0f, 29.0f, 35.0f, 36.0f, 55.0f,
5939     }));
5940
5941     return result;
5942 }
5943
5944 LayerTestResult<float, 3> Concatenation3dDim2DiffInputDimsTest(
5945     armnn::IWorkloadFactory& workloadFactory,
5946     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5947     bool useSubtensor)
5948 {
5949     return Concatenation3dDim2DiffInputDimsTestImpl<armnn::DataType::Float32>(
5950         workloadFactory, memoryManager, useSubtensor, 0.0f, 0);
5951 }
5952
5953 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
5954 LayerTestResult<T, 4> Concatenation4dTestImpl(
5955     armnn::IWorkloadFactory& workloadFactory,
5956     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5957     const armnn::TensorInfo& outputTensorInfo,
5958     unsigned int dimension,
5959     bool useSubtensor,
5960     float qScale,
5961     int32_t qOffset)
5962 {
5963     armnn::TensorInfo inputTensorInfo({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
5964
5965     auto input0 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5966         1.0f, 2.0f,
5967         3.0f, 4.0f,
5968         5.0f, 6.0f,
5969         7.0f, 8.0f,
5970         9.0f, 10.0f,
5971         11.0f, 12.0f
5972     }));
5973
5974     auto input1 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5975         11.0f, 12.0f,
5976         13.0f, 14.0f,
5977         15.0f, 16.0f,
5978         17.0f, 18.0f,
5979         19.0f, 20.0f,
5980         21.0f, 22.0f
5981     }));
5982
5983     auto input2 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5984         21.0f, 22.0f,
5985         23.0f, 24.0f,
5986         25.0f, 26.0f,
5987         27.0f, 28.0f,
5988         29.0f, 30.0f,
5989         31.0f, 32.0f
5990     }));
5991
5992     LayerTestResult<T, 4> result(outputTensorInfo);
5993
5994     std::vector<T> output;
5995     output.resize(outputTensorInfo.GetNumElements());
5996
5997     Concatenate<T>(workloadFactory,
5998                    memoryManager,
5999                    {inputTensorInfo, inputTensorInfo, inputTensorInfo},
6000                    {input0.data(), input1.data(), input2.data()},
6001                    outputTensorInfo,
6002                    output.data(),
6003                    dimension,
6004                    useSubtensor);
6005
6006     result.output = MakeTensor<T, 4>(outputTensorInfo, output);
6007     return result;
6008 }
6009
6010 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
6011 LayerTestResult<T, 4> Concatenation4dDim0TestImpl(
6012     armnn::IWorkloadFactory& workloadFactory,
6013     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6014     float qScale,
6015     int32_t qOffset)
6016 {
6017     armnn::TensorInfo outputTensorInfo({ 3, 3, 2, 2 }, ArmnnType, qScale, qOffset);
6018
6019     LayerTestResult<T, 4> result = Concatenation4dTestImpl<ArmnnType>(
6020         workloadFactory, memoryManager, outputTensorInfo, 0, true, qScale, qOffset);
6021
6022     result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
6023         1.0f, 2.0f,
6024         3.0f, 4.0f,
6025         5.0f, 6.0f,
6026         7.0f, 8.0f,
6027         9.0f, 10.0f,
6028         11.0f, 12.0f,
6029
6030         11.0f, 12.0f,
6031         13.0f, 14.0f,
6032         15.0f, 16.0f,
6033         17.0f, 18.0f,
6034         19.0f, 20.0f,
6035         21.0f, 22.0f,
6036
6037         21.0f, 22.0f,
6038         23.0f, 24.0f,
6039         25.0f, 26.0f,
6040         27.0f, 28.0f,
6041         29.0f, 30.0f,
6042         31.0f, 32.0f
6043     }));
6044     return result;
6045 }
6046
6047 LayerTestResult<float, 4> Concatenation4dDim0Test(
6048     armnn::IWorkloadFactory& workloadFactory,
6049     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
6050 {
6051     return Concatenation4dDim0TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
6052 }
6053
6054 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
6055 LayerTestResult<T, 4> Concatenation4dDim1TestImpl(
6056     armnn::IWorkloadFactory& workloadFactory,
6057     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6058     float qScale,
6059     int32_t qOffset)
6060 {
6061     armnn::TensorInfo outputTensorInfo({ 1, 9, 2, 2 }, ArmnnType, qScale, qOffset);
6062
6063     LayerTestResult<T, 4> result = Concatenation4dTestImpl<ArmnnType>(
6064         workloadFactory, memoryManager, outputTensorInfo, 1, true, qScale, qOffset);
6065
6066     result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
6067         1.0f, 2.0f,
6068         3.0f, 4.0f,
6069         5.0f, 6.0f,
6070         7.0f, 8.0f,
6071         9.0f, 10.0f,
6072         11.0f, 12.0f,
6073
6074         11.0f, 12.0f,
6075         13.0f, 14.0f,
6076         15.0f, 16.0f,
6077         17.0f, 18.0f,
6078         19.0f, 20.0f,
6079         21.0f, 22.0f,
6080
6081         21.0f, 22.0f,
6082         23.0f, 24.0f,
6083         25.0f, 26.0f,
6084         27.0f, 28.0f,
6085         29.0f, 30.0f,
6086         31.0f, 32.0f
6087     }));
6088
6089     return result;
6090 }
6091
6092 LayerTestResult<float, 4> Concatenation4dDim1Test(
6093     armnn::IWorkloadFactory& workloadFactory,
6094     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
6095 {
6096     return Concatenation4dDim1TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
6097 }
6098
6099 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
6100 LayerTestResult<T, 4> Concatenation4dDim2TestImpl(
6101     armnn::IWorkloadFactory& workloadFactory,
6102     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6103     float qScale,
6104     int32_t qOffset)
6105 {
6106     armnn::TensorInfo outputTensorInfo({ 1, 3, 6, 2 }, ArmnnType, qScale, qOffset);
6107
6108     LayerTestResult<T, 4> result = Concatenation4dTestImpl<ArmnnType>(
6109         workloadFactory, memoryManager, outputTensorInfo, 2, true, qScale, qOffset);
6110
6111     result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
6112         1.0f, 2.0f,
6113         3.0f, 4.0f,
6114         11.0f, 12.0f,
6115         13.0f, 14.0f,
6116         21.0f, 22.0f,
6117         23.0f, 24.0f,
6118
6119         5.0f, 6.0f,
6120         7.0f, 8.0f,
6121         15.0f, 16.0f,
6122         17.0f, 18.0f,
6123         25.0f, 26.0f,
6124         27.0f, 28.0f,
6125
6126         9.0f, 10.0f,
6127         11.0f, 12.0f,
6128         19.0f, 20.0f,
6129         21.0f, 22.0f,
6130         29.0f, 30.0f,
6131         31.0f, 32.0f
6132     }));
6133
6134     return result;
6135 }
6136
6137 LayerTestResult<float, 4> Concatenation4dDim2Test(
6138     armnn::IWorkloadFactory& workloadFactory,
6139     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
6140 {
6141     return Concatenation4dDim2TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
6142 }
6143
6144 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
6145 LayerTestResult<T, 4> Concatenation4dDim3TestImpl(
6146     armnn::IWorkloadFactory& workloadFactory,
6147     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6148     float qScale,
6149     int32_t qOffset,
6150     bool useSubtensor)
6151 {
6152     armnn::TensorInfo outputTensorInfo({ 1, 3, 2, 6 }, ArmnnType, qScale, qOffset);
6153
6154     LayerTestResult<T, 4> result = Concatenation4dTestImpl<ArmnnType>(
6155         workloadFactory, memoryManager, outputTensorInfo, 3, useSubtensor, qScale, qOffset);
6156
6157     result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
6158         1.0f, 2.0f,
6159         11.0f, 12.0f,
6160         21.0f, 22.0f,
6161         3.0f, 4.0f,
6162         13.0f, 14.0f,
6163         23.0f, 24.0f,
6164
6165         5.0f, 6.0f,
6166         15.0f, 16.0f,
6167         25.0f, 26.0f,
6168         7.0f, 8.0f,
6169         17.0f, 18.0f,
6170         27.0f, 28.0f,
6171
6172         9.0f, 10.0f,
6173         19.0f, 20.0f,
6174         29.0f, 30.0f,
6175         11.0f, 12.0f,
6176         21.0f, 22.0f,
6177         31.0f, 32.0f
6178     }));
6179
6180     return result;
6181 }
6182
6183 LayerTestResult<float, 4> Concatenation4dDim3Test(
6184     armnn::IWorkloadFactory& workloadFactory,
6185     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6186     bool useSubtensor)
6187 {
6188     return Concatenation4dDim3TestImpl<armnn::DataType::Float32>(
6189         workloadFactory, memoryManager, 0.0f, 0, useSubtensor);
6190 }
6191
6192 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
6193 LayerTestResult<T, 4> Concatenation4dDiffShapeDim0TestImpl(
6194     armnn::IWorkloadFactory& workloadFactory,
6195     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6196     float qScale,
6197     int32_t qOffset)
6198 {
6199     unsigned int dimension = 0;
6200     armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
6201
6202     auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
6203         1.0f, 2.0f,
6204         3.0f, 4.0f,
6205         5.0f, 6.0f,
6206         7.0f, 8.0f,
6207         9.0f, 10.0f,
6208         11.0f, 12.0f
6209     }));
6210
6211     armnn::TensorInfo inputTensorInfo1({ 2, 3, 2, 2 }, ArmnnType, qScale, qOffset);
6212
6213     auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
6214         11.0f, 12.0f,
6215         13.0f, 14.0f,
6216         15.0f, 16.0f,
6217         17.0f, 18.0f,
6218         19.0f, 20.0f,
6219         21.0f, 22.0f,
6220
6221         21.0f, 22.0f,
6222         23.0f, 24.0f,
6223         25.0f, 26.0f,
6224         27.0f, 28.0f,
6225         29.0f, 30.0f,
6226         31.0f, 32.0f
6227
6228     }));
6229
6230     armnn::TensorInfo outputTensorInfo({ 3, 3, 2, 2 }, ArmnnType, qScale, qOffset);
6231
6232     LayerTestResult<T, 4> result(outputTensorInfo);
6233
6234     std::vector<T> output;
6235     output.resize(outputTensorInfo.GetNumElements());
6236     Concatenate<T>(workloadFactory,
6237                    memoryManager,
6238                    {inputTensorInfo0, inputTensorInfo1},
6239                    {input0.data(), input1.data()},
6240                    outputTensorInfo,
6241                    output.data(),
6242                    dimension,
6243                    true);
6244
6245     result.output = MakeTensor<T, 4>(outputTensorInfo, output);
6246     result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
6247         1.0f, 2.0f,
6248         3.0f, 4.0f,
6249         5.0f, 6.0f,
6250         7.0f, 8.0f,
6251         9.0f, 10.0f,
6252         11.0f, 12.0f,
6253
6254         11.0f, 12.0f,
6255         13.0f, 14.0f,
6256         15.0f, 16.0f,
6257         17.0f, 18.0f,
6258         19.0f, 20.0f,
6259         21.0f, 22.0f,
6260
6261         21.0f, 22.0f,
6262         23.0f, 24.0f,
6263         25.0f, 26.0f,
6264         27.0f, 28.0f,
6265         29.0f, 30.0f,
6266         31.0f, 32.0f
6267     }));
6268
6269     return result;
6270 }
6271
6272 LayerTestResult<float, 4> Concatenation4dDiffShapeDim0Test(
6273     armnn::IWorkloadFactory& workloadFactory,
6274     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
6275 {
6276     return Concatenation4dDiffShapeDim0TestImpl<armnn::DataType::Float32>(
6277         workloadFactory, memoryManager, 0.0f, 0);
6278 }
6279
6280 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
6281 LayerTestResult<T, 4> Concatenation4dDiffShapeDim1TestImpl(
6282     armnn::IWorkloadFactory& workloadFactory,
6283     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6284     float qScale,
6285     int32_t qOffset)
6286 {
6287     unsigned int dimension = 1;
6288     armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
6289
6290     auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
6291         1.0f, 2.0f,
6292         3.0f, 4.0f,
6293         5.0f, 6.0f,
6294         7.0f, 8.0f,
6295         9.0f, 10.0f,
6296         11.0f, 12.0f
6297     }));
6298
6299     armnn::TensorInfo inputTensorInfo1({ 1, 2, 2, 2 }, ArmnnType, qScale, qOffset);
6300
6301     auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
6302         11.0f, 12.0f,
6303         13.0f, 14.0f,
6304         15.0f, 16.0f,
6305         17.0f, 18.0f,
6306
6307     }));
6308
6309     armnn::TensorInfo outputTensorInfo({ 1, 5, 2, 2 }, ArmnnType, qScale, qOffset);
6310
6311     LayerTestResult<T, 4> result(outputTensorInfo);
6312
6313     std::vector<T> output;
6314     output.resize(outputTensorInfo.GetNumElements());
6315     Concatenate<T>(workloadFactory,
6316                    memoryManager,
6317                    {inputTensorInfo0, inputTensorInfo1},
6318                    {input0.data(), input1.data()},
6319                    outputTensorInfo,
6320                    output.data(),
6321                    dimension,
6322                    true);
6323
6324     result.output = MakeTensor<T, 4>(outputTensorInfo, output);
6325     result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
6326         1.0f, 2.0f,
6327         3.0f, 4.0f,
6328         5.0f, 6.0f,
6329         7.0f, 8.0f,
6330         9.0f, 10.0f,
6331         11.0f, 12.0f,
6332         11.0f, 12.0f,
6333         13.0f, 14.0f,
6334         15.0f, 16.0f,
6335         17.0f, 18.0f
6336     }));
6337
6338     return result;
6339 }
6340
6341 LayerTestResult<float, 4> Concatenation4dDiffShapeDim1Test(
6342     armnn::IWorkloadFactory& workloadFactory,
6343     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
6344 {
6345     return Concatenation4dDiffShapeDim1TestImpl<armnn::DataType::Float32>(
6346         workloadFactory, memoryManager, 0.0f, 0);
6347 }
6348
6349 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
6350 LayerTestResult<T, 4> Concatenation4dDiffShapeDim2TestImpl(
6351     armnn::IWorkloadFactory& workloadFactory,
6352     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6353     float qScale,
6354     int32_t qOffset)
6355 {
6356     unsigned int dimension = 2;
6357     armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
6358
6359     auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
6360         1.0f, 2.0f,
6361         3.0f, 4.0f,
6362         5.0f, 6.0f,
6363         7.0f, 8.0f,
6364         9.0f, 10.0f,
6365         11.0f, 12.0f
6366     }));
6367
6368     armnn::TensorInfo inputTensorInfo1({ 1, 3, 3, 2 }, ArmnnType, qScale, qOffset);
6369
6370     auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
6371         11.0f, 12.0f,
6372         13.0f, 14.0f,
6373         15.0f, 16.0f,
6374         17.0f, 18.0f,
6375         19.0f, 20.0f,
6376         21.0f, 22.0f,
6377         23.0f, 24.0f,
6378         25.0f, 26.0f,
6379         27.0f, 28.0f
6380     }));
6381
6382     armnn::TensorInfo outputTensorInfo({ 1, 3, 5, 2 }, ArmnnType, qScale, qOffset);
6383
6384     LayerTestResult<T, 4> result(outputTensorInfo);
6385
6386     std::vector<T> output;
6387     output.resize(outputTensorInfo.GetNumElements());
6388     Concatenate<T>(workloadFactory,
6389                    memoryManager,
6390                    {inputTensorInfo0, inputTensorInfo1},
6391                    {input0.data(), input1.data()},
6392                    outputTensorInfo,
6393                    output.data(),
6394                    dimension,
6395                    true);
6396
6397     result.output = MakeTensor<T, 4>(outputTensorInfo, output);
6398     result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
6399         1.0f, 2.0f,
6400         3.0f, 4.0f,
6401         11.0f, 12.0f,
6402         13.0f, 14.0f,
6403         15.0f, 16.0f,
6404
6405         5.0f, 6.0f,
6406         7.0f, 8.0f,
6407         17.0f, 18.0f,
6408         19.0f, 20.0f,
6409         21.0f, 22.0f,
6410
6411         9.0f, 10.0f,
6412         11.0f, 12.0f,
6413         23.0f, 24.0f,
6414         25.0f, 26.0f,
6415         27.0f, 28.0f
6416     }));
6417
6418     return result;
6419 }
6420
6421 LayerTestResult<float, 4> Concatenation4dDiffShapeDim2Test(
6422     armnn::IWorkloadFactory& workloadFactory,
6423     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
6424 {
6425     return Concatenation4dDiffShapeDim2TestImpl<armnn::DataType::Float32>(
6426         workloadFactory, memoryManager, 0.0f, 0);
6427 }
6428
6429 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
6430 LayerTestResult<T, 4> Concatenation4dDiffShapeDim3TestImpl(
6431     armnn::IWorkloadFactory& workloadFactory,
6432     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6433     float qScale,
6434     int32_t qOffset,
6435     bool useSubtensor)
6436 {
6437     unsigned int dimension = 3;
6438     armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
6439
6440     auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
6441         1.0f, 2.0f,
6442         3.0f, 4.0f,
6443         5.0f, 6.0f,
6444         7.0f, 8.0f,
6445         9.0f, 10.0f,
6446         11.0f, 12.0f
6447     }));
6448
6449     armnn::TensorInfo inputTensorInfo1({ 1, 3, 2, 3 }, ArmnnType, qScale, qOffset);
6450
6451     auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
6452         11.0f, 12.0f, 13.0f,
6453         14.0f, 15.0f, 16.0f,
6454
6455         17.0f, 18.0f, 19.0f,
6456         20.0f, 21.0f, 22.0f,
6457
6458         23.0f, 24.0f, 25.0f,
6459         26.0f, 27.0f, 28.0f
6460     }));
6461
6462     armnn::TensorInfo outputTensorInfo({ 1, 3, 2, 5 }, ArmnnType, qScale, qOffset);
6463
6464     LayerTestResult<T, 4> result(outputTensorInfo);
6465
6466     std::vector<T> output;
6467     output.resize(outputTensorInfo.GetNumElements());
6468     Concatenate<T>(workloadFactory,
6469                    memoryManager,
6470                    {inputTensorInfo0, inputTensorInfo1},
6471                    {input0.data(), input1.data()},
6472                    outputTensorInfo,
6473                    output.data(),
6474                    dimension,
6475                    useSubtensor);
6476
6477     result.output = MakeTensor<T, 4>(outputTensorInfo, output);
6478     result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
6479         1.0f, 2.0f, 11.0f, 12.0f, 13.0f,
6480         3.0f, 4.0f, 14.0f, 15.0f, 16.0f,
6481         5.0f, 6.0f, 17.0f, 18.0f, 19.0f,
6482         7.0f, 8.0f, 20.0f, 21.0f, 22.0f,
6483         9.0f, 10.0f, 23.0f, 24.0f, 25.0f,
6484         11.0f, 12.0f, 26.0f, 27.0f, 28.0f
6485     }));
6486
6487     return result;
6488 }
6489
6490 LayerTestResult<float, 4> Concatenation4dDiffShapeDim3Test(
6491     armnn::IWorkloadFactory& workloadFactory,
6492     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6493     bool useSubtensor)
6494 {
6495     return Concatenation4dDiffShapeDim3TestImpl<armnn::DataType::Float32>(
6496         workloadFactory, memoryManager, 0.0f, 0, useSubtensor);
6497 }
6498
6499 LayerTestResult<float, 2> FakeQuantizationTest(
6500     armnn::IWorkloadFactory& workloadFactory,
6501     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
6502 {
6503     constexpr unsigned int width = 2;
6504     constexpr unsigned int height = 3;
6505
6506     const armnn::TensorInfo tensorInfo({height, width },
6507         armnn::DataType::Float32);
6508     auto input = MakeTensor<float, 2>(tensorInfo, std::vector<float>({
6509        -10.0f,  -5.0f,
6510          0.0f,   5.0f,
6511         10.0f,  10.0f
6512     }));
6513
6514     LayerTestResult<float, 2> ret(tensorInfo);
6515
6516     std::unique_ptr<armnn::ITensorHandle> inputHandle  = workloadFactory.CreateTensorHandle(tensorInfo);
6517
6518     std::unique_ptr<armnn::ITensorHandle> outputHandle  = workloadFactory.CreateTensorHandle(tensorInfo);
6519
6520     armnn::FakeQuantizationQueueDescriptor data;
6521     armnn::WorkloadInfo info;
6522
6523     AddInputToWorkload(data, info, tensorInfo, inputHandle.get());
6524     AddOutputToWorkload(data, info, tensorInfo, outputHandle.get());
6525     float min = -10.f;
6526     float max = 10.f;
6527
6528     data.m_Parameters.m_Min = min;
6529     data.m_Parameters.m_Max = max;
6530
6531     armnn::PassthroughCpuTensorHandle refHandle(tensorInfo, &ret.outputExpected[0][0]);
6532     armnn::FakeQuantizationQueueDescriptor refData = data;
6533     armnn::WorkloadInfo refInfo = info;
6534     SetWorkloadOutput(refData, refInfo, 0, tensorInfo, &refHandle);
6535
6536     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateFakeQuantization(data, info);
6537
6538     inputHandle->Allocate();
6539     outputHandle->Allocate();
6540
6541     CopyDataToITensorHandle(inputHandle.get(), &input[0][0]);
6542
6543     workload->PostAllocationConfigure();
6544     workload->Execute();
6545
6546     CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get());
6547
6548     ret.outputExpected = MakeTensor<float, 2>(tensorInfo, std::vector<float>({
6549         0.0f,     63.0f,
6550         128.0f,   191.0f,
6551         255.0f,   255.0f
6552     }));
6553     return ret;
6554 }
6555
6556 namespace
6557 {
6558 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
6559 LayerTestResult<T, 4> L2NormalizationTestImpl(
6560     armnn::IWorkloadFactory& workloadFactory,
6561     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6562     const armnn::TensorShape& inputOutputTensorShape,
6563     float scale,
6564     int32_t offset,
6565     const std::vector<float>& inputValues,
6566     float outScale,
6567     int32_t outOffset,
6568     const std::vector<float>& expectedOutputValues,
6569     const armnn::DataLayout layout,
6570     float epsilon = 1e-12f)
6571 {
6572     const armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, ArmnnType, scale, offset);
6573     const armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, ArmnnType, outScale, outOffset);
6574
6575     // at this point if we require it permute the input data
6576     const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
6577     std::vector<float> inputData = inputValues;
6578     if (layout == armnn::DataLayout::NHWC)
6579     {
6580         std::vector<float> tmp(inputData.size());
6581         armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
6582         inputData = tmp;
6583     }
6584
6585     auto inputTensor = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(
6586                                                          inputTensorInfo.GetQuantizationScale(),
6587                                                          inputTensorInfo.GetQuantizationOffset(),
6588                                                          inputData));
6589
6590     std::vector<float> expectedOutputData = expectedOutputValues;
6591     if (layout == armnn::DataLayout::NHWC)
6592     {
6593         std::vector<float> tmp(expectedOutputData.size());
6594         armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, expectedOutputData.data(), tmp.data(),
6595                             sizeof(float));
6596         expectedOutputData = tmp;
6597     }
6598
6599     LayerTestResult<T, 4> result(outputTensorInfo);
6600     result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(
6601                                                                outputTensorInfo.GetQuantizationScale(),
6602                                                                outputTensorInfo.GetQuantizationOffset(),
6603                                                                expectedOutputData));
6604
6605     std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
6606     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6607
6608     armnn::L2NormalizationQueueDescriptor descriptor;
6609     descriptor.m_Parameters.m_Eps = epsilon;
6610     descriptor.m_Parameters.m_DataLayout = layout;
6611     armnn::WorkloadInfo info;
6612
6613     AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
6614     AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
6615
6616     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateL2Normalization(descriptor, info);
6617
6618     inputHandle->Allocate();
6619     outputHandle->Allocate();
6620
6621     CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
6622
6623     workload->PostAllocationConfigure();
6624     ExecuteWorkload(*workload, memoryManager);
6625
6626     CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
6627
6628     return result;
6629 }
6630
6631 float CalcInvL2Norm(std::initializer_list<float> elements)
6632 {
6633     const float reduction = std::accumulate(elements.begin(), elements.end(), 0.0f,
6634         [](float acc, float element) { return acc + element * element; });
6635     return 1.0f / sqrtf(reduction);
6636 }
6637
6638 } // anonymous namespace
6639
6640 template<armnn::DataType ArmnnType, typename T>
6641 LayerTestResult<T, 2> Pad2dTestCommon(
6642     armnn::IWorkloadFactory& workloadFactory,
6643     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6644     float qScale,
6645     int32_t qOffset,
6646     const float customPaddingValue)
6647 {
6648     const armnn::TensorShape inputShape{ 3, 3 };
6649     const armnn::TensorShape outputShape{ 7, 7 };
6650
6651     const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset);
6652     const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType, qScale, qOffset);
6653
6654     std::vector<T> inputValues(
6655     QuantizedVector<T>(qScale, qOffset,
6656     {
6657       // Height (3) x Width (3)
6658       4, 8, 6,
6659       7, 4, 4,
6660       3, 2, 4
6661     }));
6662
6663     auto p = customPaddingValue;
6664     std::vector<T> expectedOutputValues;
6665     expectedOutputValues = (
6666     QuantizedVector<T>(qScale, qOffset,
6667     {
6668       p, p, p, p, p, p, p,
6669       p, p, p, p, p, p, p,
6670       p, p, 4, 8, 6, p, p,
6671       p, p, 7, 4, 4, p, p,
6672       p, p, 3, 2, 4, p, p,
6673       p, p, p, p, p, p, p,
6674       p, p, p, p, p, p, p
6675     }));
6676
6677     auto inputTensor = MakeTensor<T, 2>(inputTensorInfo, std::vector<T>(inputValues));
6678
6679     LayerTestResult<T, 2> result(outputTensorInfo);
6680     result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, std::vector<T>(expectedOutputValues));
6681
6682     std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
6683     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6684
6685     armnn::PadQueueDescriptor descriptor;
6686
6687     std::vector<std::pair<unsigned int, unsigned int>> padList;
6688     padList.push_back(std::pair<unsigned int, unsigned int>(2,2));
6689     padList.push_back(std::pair<unsigned int, unsigned int>(2,2));
6690
6691     descriptor.m_Parameters.m_PadList = padList;
6692     descriptor.m_Parameters.m_PadValue = customPaddingValue;
6693     armnn::WorkloadInfo info;
6694
6695     AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
6696     AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
6697
6698     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
6699
6700     inputHandle->Allocate();
6701     outputHandle->Allocate();
6702
6703     CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]);
6704
6705     workload->PostAllocationConfigure();
6706     workload->Execute();
6707
6708     CopyDataFromITensorHandle(&result.output[0][0], outputHandle.get());
6709
6710     return result;
6711 }
6712
6713 template<armnn::DataType ArmnnType, typename T>
6714 LayerTestResult<T, 3> Pad3dTestCommon(
6715     armnn::IWorkloadFactory& workloadFactory,
6716     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6717     float qScale,
6718     int32_t qOffset)
6719 {
6720     const armnn::TensorShape inputShape{ 2, 2, 2 };
6721     const armnn::TensorShape outputShape{ 3, 5, 6 };
6722
6723     const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset);
6724     const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType, qScale, qOffset);
6725
6726     std::vector<T> inputValues(
6727       QuantizedVector<T>(qScale,qOffset,
6728     {
6729         // Channel 0, Height (2) x Width (2)
6730         0, 4,
6731         2, 5,
6732
6733         // Channel 1, Height (2) x Width (2)
6734         6, 1,
6735         5, 2
6736     }));
6737
6738     std::vector<T> expectedOutputValues(
6739       QuantizedVector<T>(qScale,qOffset,
6740     {
6741
6742         0, 0, 0, 0, 0, 0,
6743         0, 0, 0, 0, 0, 0,
6744         0, 0, 0, 4, 0, 0,
6745         0, 0, 2, 5, 0, 0,
6746         0, 0, 0, 0, 0, 0,
6747
6748         0, 0, 0, 0, 0, 0,
6749         0, 0, 0, 0, 0, 0,
6750         0, 0, 6, 1, 0, 0,
6751         0, 0, 5, 2, 0, 0,
6752         0, 0, 0, 0, 0, 0,
6753
6754         0, 0, 0, 0, 0, 0,
6755         0, 0, 0, 0, 0, 0,
6756         0, 0, 0, 0, 0, 0,
6757         0, 0, 0, 0, 0, 0,
6758         0, 0, 0, 0, 0, 0
6759
6760     }));
6761
6762     auto inputTensor = MakeTensor<T, 3>(inputTensorInfo, std::vector<T>(inputValues));
6763
6764     LayerTestResult<T, 3> result(outputTensorInfo);
6765     result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, std::vector<T>(expectedOutputValues));
6766
6767     std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
6768     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6769
6770     armnn::PadQueueDescriptor descriptor;
6771
6772     std::vector<std::pair<unsigned int, unsigned int>> PadList;
6773     PadList.push_back(std::pair<unsigned int, unsigned int>(0,1));
6774     PadList.push_back(std::pair<unsigned int, unsigned int>(2,1));
6775     PadList.push_back(std::pair<unsigned int, unsigned int>(2,2));
6776
6777     descriptor.m_Parameters.m_PadList = PadList;
6778     armnn::WorkloadInfo info;
6779
6780     AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
6781     AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
6782
6783     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
6784
6785     inputHandle->Allocate();
6786     outputHandle->Allocate();
6787
6788     CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0]);
6789
6790     workload->PostAllocationConfigure();
6791     workload->Execute();
6792
6793     CopyDataFromITensorHandle(&result.output[0][0][0], outputHandle.get());
6794
6795     return result;
6796 }
6797
6798 template<armnn::DataType ArmnnType, typename T>
6799 LayerTestResult<T, 4> Pad4dTestCommon(
6800     armnn::IWorkloadFactory& workloadFactory,
6801     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6802     float qScale,
6803     int32_t qOffset)
6804 {
6805     const armnn::TensorShape inputShape{ 2, 2, 3, 2 };
6806     const armnn::TensorShape outputShape{ 4, 5, 7, 4 };
6807
6808     const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset);
6809     const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType, qScale, qOffset);
6810
6811     std::vector<T> inputValues(
6812       QuantizedVector<T>(qScale,qOffset,
6813     {
6814         // Batch 0, Channel 0, Height (3) x Width (2)
6815         0, 1,
6816         2, 3,
6817         4, 5,
6818
6819         // Batch 0, Channel 1, Height (3) x Width (2)
6820         6, 7,
6821         8, 9,
6822         10, 11,
6823
6824         // Batch 1, Channel 0, Height (3) x Width (2)
6825         12, 13,
6826         14, 15,
6827         16, 17,
6828
6829         // Batch 1, Channel 1, Height (3) x Width (2)
6830         18, 19,
6831         20, 21,
6832         22, 23
6833     }));
6834
6835     std::vector<T> expectedOutputValues(
6836       QuantizedVector<T>(qScale,qOffset,
6837     {
6838         0, 0, 0, 0,
6839         0, 0, 0, 0,
6840         0, 0, 0, 0,
6841         0, 0, 0, 0,
6842         0, 0, 0, 0,
6843         0, 0, 0, 0,
6844         0, 0, 0, 0,
6845
6846         0, 0, 0, 0,
6847         0, 0, 0, 0,
6848         0, 0, 0, 0,
6849         0, 0, 0, 0,
6850         0, 0, 0, 0,
6851         0, 0, 0, 0,
6852         0, 0, 0, 0,
6853
6854         0, 0, 0, 0,
6855         0, 0, 0, 0,
6856         0, 0, 0, 0,
6857         0, 0, 0, 0,
6858         0, 0, 0, 0,
6859         0, 0, 0, 0,
6860         0, 0, 0, 0,
6861
6862         0, 0, 0, 0,
6863         0, 0, 0, 0,
6864         0, 0, 0, 0,
6865         0, 0, 0, 0,
6866         0, 0, 0, 0,
6867         0, 0, 0, 0,
6868         0, 0, 0, 0,
6869
6870         0, 0, 0, 0,
6871         0, 0, 0, 0,
6872         0, 0, 0, 0,
6873         0, 0, 0, 0,
6874         0, 0, 0, 0,
6875         0, 0, 0, 0,
6876         0, 0, 0, 0,
6877
6878         0, 0, 0, 0,
6879         0, 0, 0, 0,
6880         0, 0, 0, 0,
6881         0, 0, 0, 0,
6882         0, 0, 0, 0,
6883         0, 0, 0, 0,
6884         0, 0, 0, 0,
6885
6886         0, 0, 0, 0,
6887         0, 0, 0, 0,
6888         0, 0, 0, 0,
6889         0, 0, 0, 0,
6890         0, 0, 0, 0,
6891         0, 0, 0, 0,
6892         0, 0, 0, 0,
6893
6894         0, 0, 0, 0,
6895         0, 0, 0, 0,
6896         0, 0, 0, 0,
6897         0, 0, 1, 0,
6898         0, 2, 3, 0,
6899         0, 4, 5, 0,
6900         0, 0, 0, 0,
6901
6902         0, 0, 0, 0,
6903         0, 0, 0, 0,
6904         0, 0, 0, 0,
6905         0, 6, 7, 0,
6906         0, 8, 9, 0,
6907         0, 10, 11, 0,
6908         0, 0, 0, 0,
6909
6910         0, 0, 0, 0,
6911         0, 0, 0, 0,
6912         0, 0, 0, 0,
6913         0, 0, 0, 0,
6914         0, 0, 0, 0,
6915         0, 0, 0, 0,
6916         0, 0, 0, 0,
6917
6918         0, 0, 0, 0,
6919         0, 0, 0, 0,
6920         0, 0, 0, 0,
6921         0, 0, 0, 0,
6922         0, 0, 0, 0,
6923         0, 0, 0, 0,
6924         0, 0, 0, 0,
6925
6926         0, 0, 0, 0,
6927         0, 0, 0, 0,
6928         0, 0, 0, 0,
6929         0, 0, 0, 0,
6930         0, 0, 0, 0,
6931         0, 0, 0, 0,
6932         0, 0, 0, 0,
6933
6934         0, 0, 0, 0,
6935         0, 0, 0, 0,
6936         0, 0, 0, 0,
6937         0, 12, 13, 0,
6938         0, 14, 15, 0,
6939         0, 16, 17, 0,
6940         0, 0, 0, 0,
6941
6942         0, 0, 0, 0,
6943         0, 0, 0, 0,
6944         0, 0, 0, 0,
6945         0, 18, 19, 0,
6946         0, 20, 21, 0,
6947         0, 22, 23, 0,
6948         0, 0, 0, 0,
6949
6950         0, 0, 0, 0,
6951         0, 0, 0, 0,
6952         0, 0, 0, 0,
6953         0, 0, 0, 0,
6954         0, 0, 0, 0,
6955         0, 0, 0, 0,
6956         0, 0, 0, 0,
6957
6958         0, 0, 0, 0,
6959         0, 0, 0, 0,
6960         0, 0, 0, 0,
6961         0, 0, 0, 0,
6962         0, 0, 0, 0,
6963         0, 0, 0, 0,
6964         0, 0, 0, 0,
6965
6966         0, 0, 0, 0,
6967         0, 0, 0, 0,
6968         0, 0, 0, 0,
6969         0, 0, 0, 0,
6970         0, 0, 0, 0,
6971         0, 0, 0, 0,
6972         0, 0, 0, 0,
6973
6974         0, 0, 0, 0,
6975         0, 0, 0, 0,
6976         0, 0, 0, 0,
6977         0, 0, 0, 0,
6978         0, 0, 0, 0,
6979         0, 0, 0, 0,
6980         0, 0, 0, 0,
6981
6982         0, 0, 0, 0,
6983         0, 0, 0, 0,
6984         0, 0, 0, 0,
6985         0, 0, 0, 0,
6986         0, 0, 0, 0,
6987         0, 0, 0, 0,
6988         0, 0, 0, 0,
6989
6990         0, 0, 0, 0,
6991         0, 0, 0, 0,
6992         0, 0, 0, 0,
6993         0, 0, 0, 0,
6994         0, 0, 0, 0,
6995         0, 0, 0, 0,
6996         0, 0, 0, 0
6997     }));
6998
6999     auto inputTensor = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(inputValues));
7000
7001     LayerTestResult<T, 4> result(outputTensorInfo);
7002     result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(expectedOutputValues));
7003
7004     std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
7005     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7006
7007     armnn::PadQueueDescriptor descriptor;
7008
7009     std::vector<std::pair<unsigned int, unsigned int>> PadList;
7010     PadList.push_back(std::pair<unsigned int, unsigned int>(1,1));
7011     PadList.push_back(std::pair<unsigned int, unsigned int>(2,1));
7012     PadList.push_back(std::pair<unsigned int, unsigned int>(3,1));
7013     PadList.push_back(std::pair<unsigned int, unsigned int>(1,1));
7014
7015     descriptor.m_Parameters.m_PadList = PadList;
7016     armnn::WorkloadInfo info;
7017
7018     AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
7019     AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
7020
7021     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
7022
7023     inputHandle->Allocate();
7024     outputHandle->Allocate();
7025
7026     CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
7027
7028     workload->PostAllocationConfigure();
7029     workload->Execute();
7030
7031     CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
7032
7033     return result;
7034 }
7035
7036 LayerTestResult<uint8_t, 2> PadUint82dTest(
7037     armnn::IWorkloadFactory& workloadFactory,
7038     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7039 {
7040     return Pad2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
7041 }
7042
7043 LayerTestResult<uint8_t, 2> PadUint82dCustomPaddingTest(
7044     armnn::IWorkloadFactory& workloadFactory,
7045     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7046 {
7047     return Pad2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0, 1.0f);
7048 }
7049
7050 LayerTestResult<uint8_t, 3> PadUint83dTest(
7051     armnn::IWorkloadFactory& workloadFactory,
7052     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7053 {
7054     return Pad3dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
7055 }
7056
7057 LayerTestResult<uint8_t, 4> PadUint84dTest(
7058     armnn::IWorkloadFactory& workloadFactory,
7059     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7060 {
7061     return Pad4dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
7062 }
7063
7064
7065 template LayerTestResult<typename armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 2>
7066 Pad2dTestCommon<armnn::DataType::QuantisedSymm16>(
7067     armnn::IWorkloadFactory& workloadFactory,
7068     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7069     float qScale,
7070     int32_t qOffset,
7071     const float customPaddingValue);
7072
7073 template LayerTestResult<typename armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 3>
7074 Pad3dTestCommon<armnn::DataType::QuantisedSymm16>(
7075     armnn::IWorkloadFactory& workloadFactory,
7076     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7077     float qScale,
7078     int32_t qOffset);
7079
7080 template LayerTestResult<typename armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
7081 Pad4dTestCommon<armnn::DataType::QuantisedSymm16>(
7082     armnn::IWorkloadFactory& workloadFactory,
7083     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7084     float qScale,
7085     int32_t qOffset);
7086
7087 LayerTestResult<float, 2> PadFloat322dTest(
7088     armnn::IWorkloadFactory& workloadFactory,
7089     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7090 {
7091     return Pad2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
7092 }
7093
7094 LayerTestResult<float, 2> PadFloat322dCustomPaddingTest(
7095     armnn::IWorkloadFactory& workloadFactory,
7096     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7097 {
7098     return Pad2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0, 1.0f);
7099 }
7100
7101 LayerTestResult<float, 3> PadFloat323dTest(
7102     armnn::IWorkloadFactory& workloadFactory,
7103     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7104 {
7105     return Pad3dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
7106 }
7107
7108 LayerTestResult<float, 4> PadFloat324dTest(
7109     armnn::IWorkloadFactory& workloadFactory,
7110     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7111 {
7112     return Pad4dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
7113 }
7114
7115 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
7116 LayerTestResult<T, 4> L2NormalizationEpsilonTestCommon(
7117         armnn::IWorkloadFactory& workloadFactory,
7118         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7119         float scale,
7120         int32_t offset,
7121         float outScale,
7122         int32_t outOffset,
7123         const armnn::DataLayout layout,
7124         float epsilon)
7125 {
7126     // Width: 1
7127     // Height: 1
7128     // Channels: 3
7129     // BatchSize: 1
7130     unsigned int numberOfBatches = 1;
7131     unsigned int numberOfChannels = 3;
7132     unsigned int height = 1;
7133     unsigned int width = 1;
7134
7135     const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
7136             numberOfBatches, numberOfChannels, height, width, layout);
7137
7138     // 0.0000001^2 + 0.00000002^2 + 0.00000003^2 < 1e-12
7139     std::vector<float> inputValues
7140     {
7141         // Batch 0, Channel 0, Height (1) x Width (1)
7142         0.00000001f,
7143
7144         // Batch 0, Channel 1, Height (1) x Width (1)
7145         0.00000002f,
7146
7147         // Batch 0, Channel 2, Height (1) x Width (1)
7148         0.00000003f,
7149     };
7150
7151     const float approxInvL2Norm = 1.f / sqrtf(epsilon);
7152     std::vector<float> expectedOutputValues
7153     {
7154         // Batch 0, Channel 0, Height (1) x Width (1)
7155         0.00000001f * approxInvL2Norm,
7156         0.00000002f * approxInvL2Norm,
7157         0.00000003f * approxInvL2Norm,
7158     };
7159
7160     return L2NormalizationTestImpl<ArmnnType>(workloadFactory, memoryManager, inputOutputShape, scale, offset,
7161                                               inputValues, outScale, outOffset, expectedOutputValues, layout,
7162                                               epsilon);
7163 }
7164
7165
7166 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
7167 LayerTestResult<T, 4> L2Normalization1dTestCommon(
7168         armnn::IWorkloadFactory& workloadFactory,
7169         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7170         float scale,
7171         int32_t offset,
7172         float outScale,
7173         int32_t outOffset,
7174         const armnn::DataLayout layout)
7175 {
7176     // Width: 1
7177     // Height: 1
7178     // Channels: 10
7179     // BatchSize: 1
7180     unsigned int numberOfBatches = 1;
7181     unsigned int numberOfChannels = 10;
7182     unsigned int height = 1;
7183     unsigned int width = 1;
7184
7185
7186     const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
7187             numberOfBatches, numberOfChannels, height, width, layout);
7188     std::vector<float> inputValues
7189     {
7190         // Batch 0, Channel 0, Height (1) x Width (1)
7191         1.0f,
7192
7193         // Batch 0, Channel 1, Height (1) x Width (1)
7194         2.0f,
7195
7196         // Batch 0, Channel 2, Height (1) x Width (1)
7197         3.0f,
7198
7199         // Batch 0, Channel 3, Height (1) x Width (1)
7200         4.0f,
7201
7202         // Batch 0, Channel 4, Height (1) x Width (1)
7203         5.0f,
7204
7205         // Batch 0, Channel 5, Height (1) x Width (1)
7206         6.0f,
7207
7208         // Batch 0, Channel 6, Height (1) x Width (1)
7209         7.0f,
7210
7211         // Batch 0, Channel 7, Height (1) x Width (1)
7212         8.0f,
7213
7214         // Batch 0, Channel 8, Height (1) x Width (1)
7215         9.0f,
7216
7217         // Batch 0, Channel 9, Height (1) x Width (1)
7218         10.0f
7219     };
7220     const float approxInvL2Norm = 0.050964719f;
7221     std::vector<float> expectedOutputValues
7222     {
7223         // Batch 0, Channel 0, Height (1) x Width (1)
7224         1.0f * approxInvL2Norm,
7225         2.0f * approxInvL2Norm,
7226         3.0f * approxInvL2Norm,
7227         4.0f * approxInvL2Norm,
7228         5.0f * approxInvL2Norm,
7229         6.0f * approxInvL2Norm,
7230         7.0f * approxInvL2Norm,
7231         8.0f * approxInvL2Norm,
7232         9.0f * approxInvL2Norm,
7233         10.0f * approxInvL2Norm
7234     };
7235
7236
7237     return L2NormalizationTestImpl<ArmnnType>(workloadFactory, memoryManager, inputOutputShape, scale, offset,
7238                                               inputValues, outScale, outOffset, expectedOutputValues, layout);
7239 }
7240
7241 LayerTestResult<float, 4> L2NormalizationDefaultEpsilonTest(
7242         armnn::IWorkloadFactory& workloadFactory,
7243         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7244         const armnn::DataLayout layout)
7245 {
7246     // Dummy descriptor to get the default value of epsilon.
7247     armnn::L2NormalizationDescriptor descriptor;
7248
7249     return L2NormalizationEpsilonTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.f, 0, 0.f, 0,
7250                                                                       layout, descriptor.m_Eps);
7251 }
7252
7253 LayerTestResult<float, 4> L2NormalizationNonDefaultEpsilonTest(
7254         armnn::IWorkloadFactory& workloadFactory,
7255         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7256         const armnn::DataLayout layout)
7257 {
7258     return L2NormalizationEpsilonTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.f, 0, 0.f, 0,
7259                                                                       layout, 1e-9f);
7260 }
7261
7262 LayerTestResult<float, 4> L2Normalization1dTest(
7263     armnn::IWorkloadFactory& workloadFactory,
7264     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7265     const armnn::DataLayout layout)
7266 {
7267     return L2Normalization1dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.f, 0, 0.f, 0,layout);
7268 }
7269
7270 LayerTestResult<int16_t, 4> L2Normalization1dInt16Test(
7271     armnn::IWorkloadFactory& workloadFactory,
7272     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7273     const armnn::DataLayout layout)
7274 {
7275     return L2Normalization1dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.f, 0, 1.f, 0,
7276                                                                          layout);
7277 }
7278
7279 LayerTestResult<uint8_t, 4> L2Normalization1dUint8Test(
7280     armnn::IWorkloadFactory& workloadFactory,
7281     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7282     const armnn::DataLayout layout)
7283 {
7284     return L2Normalization1dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.f, 0,
7285                                                                          1.f/128, 128, layout);
7286 }
7287
7288 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
7289 LayerTestResult<T, 4> L2Normalization2dTestCommon(
7290     armnn::IWorkloadFactory& workloadFactory,
7291     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7292     float scale,
7293     int32_t offset,
7294     float outScale,
7295     int32_t outOffset,
7296     const armnn::DataLayout layout)
7297 {
7298     // Width: 5
7299     // Height: 1
7300     // Channels: 2
7301     // BatchSize: 1
7302     unsigned int numberOfBatches = 1;
7303     unsigned int numberOfChannels = 2;
7304     unsigned int height = 1;
7305     unsigned int width = 5;
7306
7307     const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
7308             numberOfBatches, numberOfChannels, height, width, layout);
7309     std::vector<float> inputValues
7310     {
7311         // Batch 0, Channel 0, Height (1) x Width (5)
7312         1.0f, 3.0f, 5.0f, 7.0f,  9.0f,
7313
7314         // Batch 0, Channel 1, Height (1) x Width (5)
7315         2.0f, 4.0f, 6.0f, 8.0f, 10.0f
7316     };
7317     std::vector<float> expectedOutputValues
7318     {
7319         // Batch 0, Channel 0, Height (1) x Width (5)
7320         1.0f * CalcInvL2Norm({ 1.0f,  2.0f }),
7321         3.0f * CalcInvL2Norm({ 3.0f,  4.0f }),
7322         5.0f * CalcInvL2Norm({ 5.0f,  6.0f }),
7323         7.0f * CalcInvL2Norm({ 7.0f,  8.0f }),
7324         9.0f * CalcInvL2Norm({ 9.0f, 10.0f }),
7325
7326         // Batch 0, Channel 1, Height (1) x Width (5)
7327         2.0f * CalcInvL2Norm({ 1.0f,  2.0f }),
7328         4.0f * CalcInvL2Norm({ 3.0f,  4.0f }),
7329         6.0f * CalcInvL2Norm({ 5.0f,  6.0f }),
7330         8.0f * CalcInvL2Norm({ 7.0f,  8.0f }),
7331         10.0f * CalcInvL2Norm({ 9.0f, 10.0f })
7332     };
7333
7334     return L2NormalizationTestImpl<ArmnnType>(workloadFactory, memoryManager, inputOutputShape, scale, offset,
7335                                               inputValues, outScale, outOffset, expectedOutputValues, layout);
7336 }
7337
7338 LayerTestResult<float, 4> L2Normalization2dTest(
7339     armnn::IWorkloadFactory& workloadFactory,
7340     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7341     const armnn::DataLayout layout)
7342 {
7343     return L2Normalization2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.f, 0, 0.f, 0,
7344                                                                  layout);
7345 }
7346
7347 LayerTestResult<int16_t, 4> L2Normalization2dInt16Test(
7348     armnn::IWorkloadFactory& workloadFactory,
7349     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7350     const armnn::DataLayout layout)
7351 {
7352     return L2Normalization1dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.f, 0, 1.f, 0,
7353                                                                          layout);
7354 }
7355
7356 LayerTestResult<uint8_t, 4> L2Normalization2dUint8Test(
7357     armnn::IWorkloadFactory& workloadFactory,
7358     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7359     const armnn::DataLayout layout)
7360 {
7361     return L2Normalization1dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.f, 0,
7362                                                                          1.f/128, 128, layout);
7363 }
7364
7365 LayerTestResult<float, 2> L2Normalization2dShapeTest(
7366     armnn::IWorkloadFactory& workloadFactory,
7367     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7368 {
7369     const armnn::DataLayout layout = armnn::DataLayout::NHWC;
7370     const armnn::TensorShape inputOutputTensorShape = armnn::TensorShape({ 5, 2 });
7371
7372     std::vector<float> inputData
7373     {
7374         1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f, 8.f, 9.f, 10.f
7375     };
7376     std::vector<float> expectedOutputData
7377     {
7378         1.0f * CalcInvL2Norm({ 1.0f,  2.0f }),
7379         2.0f * CalcInvL2Norm({ 1.0f,  2.0f }),
7380         3.0f * CalcInvL2Norm({ 3.0f,  4.0f }),
7381         4.0f * CalcInvL2Norm({ 3.0f,  4.0f }),
7382         5.0f * CalcInvL2Norm({ 5.0f,  6.0f }),
7383         6.0f * CalcInvL2Norm({ 5.0f,  6.0f }),
7384         7.0f * CalcInvL2Norm({ 7.0f,  8.0f }),
7385         8.0f * CalcInvL2Norm({ 7.0f,  8.0f }),
7386         9.0f  * CalcInvL2Norm({ 9.0f, 10.0f }),
7387         10.0f * CalcInvL2Norm({ 9.0f, 10.0f })
7388     };
7389
7390     const armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32, 0.f, 0);
7391     const armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32, 0.f, 0);
7392
7393     auto inputTensor = MakeTensor<float, 2>(inputTensorInfo, QuantizedVector<float>(
7394                                                              inputTensorInfo.GetQuantizationScale(),
7395                                                              inputTensorInfo.GetQuantizationOffset(),
7396                                                              inputData));
7397
7398     LayerTestResult<float, 2> result(outputTensorInfo);
7399     result.outputExpected = MakeTensor<float, 2>(outputTensorInfo, QuantizedVector<float>(
7400                                                                    outputTensorInfo.GetQuantizationScale(),
7401                                                                    outputTensorInfo.GetQuantizationOffset(),
7402                                                                    expectedOutputData));
7403
7404     std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
7405     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7406
7407     armnn::L2NormalizationQueueDescriptor descriptor;
7408     descriptor.m_Parameters.m_Eps = 1e-12f;
7409     descriptor.m_Parameters.m_DataLayout = layout;
7410     armnn::WorkloadInfo info;
7411
7412     AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
7413     AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
7414
7415     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateL2Normalization(descriptor, info);
7416
7417     inputHandle->Allocate();
7418     outputHandle->Allocate();
7419
7420     CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]);
7421
7422     workload->PostAllocationConfigure();
7423     ExecuteWorkload(*workload, memoryManager);
7424
7425     CopyDataFromITensorHandle(&result.output[0][0], outputHandle.get());
7426
7427     return result;
7428 }
7429
7430 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
7431 LayerTestResult<T, 4> L2Normalization3dTestCommon(
7432     armnn::IWorkloadFactory& workloadFactory,
7433     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7434     float scale,
7435     int32_t offset,
7436     float outScale,
7437     int32_t outOffset,
7438     const armnn::DataLayout layout)
7439 {
7440     // Width: 3
7441     // Height: 4
7442     // Channels: 2
7443     // BatchSize: 1
7444     unsigned int numberOfBatches = 1;
7445     unsigned int numberOfChannels = 2;
7446     unsigned int height = 4;
7447     unsigned int width = 3;
7448
7449     const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
7450             numberOfBatches, numberOfChannels, height, width, layout);
7451     std::vector<float> inputValues
7452     {
7453         // Batch 0, Channel 0, Height (4) x Width (3)
7454         119.0f,  21.0f, 150.0f,
7455         149.0f,  32.0f, 179.0f,
7456         15.0f, 227.0f, 141.0f,
7457         147.0f, 199.0f, 220.0f,
7458
7459         // Batch 0, Channel 1, Height (4) x Width (3)
7460         110.0f, 140.0f,  73.0f,
7461         211.0f, 212.0f,  89.0f,
7462         24.0f, 138.0f, 188.0f,
7463         162.0f,  12.0f, 161.0f
7464     };
7465     std::vector<float> expectedOutputValues
7466     {
7467         // Batch 0, Channel 0, Height (4) x Width (3)
7468         119.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
7469         21.0f * CalcInvL2Norm({  21.0f, 140.0f }),
7470         150.0f * CalcInvL2Norm({ 150.0f,  73.0f }),
7471         149.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
7472         32.0f * CalcInvL2Norm({  32.0f, 212.0f }),
7473         179.0f * CalcInvL2Norm({ 179.0f,  89.0f }),
7474         15.0f * CalcInvL2Norm({  15.0f,  24.0f }),
7475         227.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
7476         141.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
7477         147.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
7478         199.0f * CalcInvL2Norm({ 199.0f,  12.0f }),
7479         220.0f * CalcInvL2Norm({ 220.0f, 161.0f }),
7480
7481         // Batch 0, Channel 1, Height (4) x Width (3)
7482         110.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
7483         140.0f * CalcInvL2Norm({  21.0f, 140.0f }),
7484         73.0f * CalcInvL2Norm({ 150.0f,  73.0f }),
7485         211.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
7486         212.0f * CalcInvL2Norm({  32.0f, 212.0f }),
7487         89.0f * CalcInvL2Norm({ 179.0f,  89.0f }),
7488         24.0f * CalcInvL2Norm({  15.0f,  24.0f }),
7489         138.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
7490         188.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
7491         162.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
7492         12.0f * CalcInvL2Norm({ 199.0f,  12.0f }),
7493         161.0f * CalcInvL2Norm({ 220.0f, 161.0f })
7494     };
7495
7496     return L2NormalizationTestImpl<ArmnnType>(workloadFactory, memoryManager, inputOutputShape, scale, offset,
7497                                               inputValues, outScale, outOffset, expectedOutputValues, layout);
7498 }
7499
7500 LayerTestResult<float, 4> L2Normalization3dTest(
7501     armnn::IWorkloadFactory& workloadFactory,
7502     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7503     const armnn::DataLayout layout)
7504 {
7505     return L2Normalization3dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.f, 0, 0.f, 0,
7506                                                                  layout);
7507 }
7508
7509 LayerTestResult<int16_t, 4> L2Normalization3dInt16Test(
7510     armnn::IWorkloadFactory& workloadFactory,
7511     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7512     const armnn::DataLayout layout)
7513 {
7514     return L2Normalization1dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.f, 0, 1.f, 0,
7515                                                                          layout);
7516 }
7517
7518 LayerTestResult<uint8_t, 4> L2Normalization3dUint8Test(
7519     armnn::IWorkloadFactory& workloadFactory,
7520     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7521     const armnn::DataLayout layout)
7522 {
7523     return L2Normalization1dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.f, 0,
7524                                                                          1.f/128, 128, layout);
7525 }
7526
7527 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
7528 LayerTestResult<T, 4> L2Normalization4dTestCommon(
7529     armnn::IWorkloadFactory& workloadFactory,
7530     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7531     float scale,
7532     int32_t offset,
7533     float outScale,
7534     int32_t outOffset,
7535     const armnn::DataLayout layout)
7536 {
7537     // Width: 3
7538     // Height: 4
7539     // Channels: 3
7540     // BatchSize: 2
7541     unsigned int numberOfBatches = 2;
7542     unsigned int numberOfChannels = 3;
7543     unsigned int height = 4;
7544     unsigned int width = 3;
7545
7546     const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
7547             numberOfBatches, numberOfChannels, height, width, layout);
7548     std::vector<float> inputValues
7549     {
7550         // Batch 0, Channel 0, Height (4) x Width (3)
7551         235.0f,  46.0f, 178.0f,
7552         100.0f, 123.0f,  19.0f,
7553         172.0f,  74.0f, 250.0f,
7554         6.0f, 195.0f,  80.0f,
7555
7556         // Batch 0, Channel 1, Height (4) x Width (3)
7557         113.0f,  95.0f, 202.0f,
7558         77.0f, 114.0f,  71.0f,
7559         122.0f, 246.0f, 166.0f,
7560         82.0f,  28.0f,  37.0f,
7561
7562         // Batch 0, Channel 2, Height (4) x Width (3)
7563         56.0f, 170.0f, 162.0f,
7564         194.0f,  89.0f, 254.0f,
7565         12.0f, 209.0f, 200.0f,
7566         1.0f,  64.0f,  54.0f,
7567
7568         // Batch 1, Channel 0, Height (4) x Width (3)
7569         67.0f,  90.0f,  49.0f,
7570         7.0f, 163.0f,  18.0f,
7571         25.0f, 117.0f, 103.0f,
7572         247.0f,  59.0f, 189.0f,
7573
7574         // Batch 1, Channel 1, Height (4) x Width (3)
7575         239.0f, 104.0f, 199.0f,
7576         17.0f, 124.0f, 153.0f,
7577         222.0f, 217.0f, 75.0f,
7578         32.0f, 126.0f, 21.0f,
7579
7580         // Batch 1, Channel 2, Height (4) x Width (3)
7581         97.0f, 145.0f, 215.0f,
7582         115.0f, 116.0f, 238.0f,
7583         226.0f,  16.0f, 132.0f,
7584         92.0f, 125.0f,  88.0f
7585     };
7586     std::vector<float> expectedOutputValues
7587     {
7588         // Batch 0, Channel 0, Height (4) x Width (3)
7589         235.0f * CalcInvL2Norm({ 235.0f, 113.0f,  56.0f }),
7590         46.0f * CalcInvL2Norm({  46.0f,  95.0f, 170.0f }),
7591         178.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
7592         100.0f * CalcInvL2Norm({ 100.0f,  77.0f, 194.0f }),
7593         123.0f * CalcInvL2Norm({ 123.0f, 114.0f,  89.0f }),
7594         19.0f * CalcInvL2Norm({  19.0f,  71.0f, 254.0f }),
7595         172.0f * CalcInvL2Norm({ 172.0f, 122.0f,  12.0f }),
7596         74.0f * CalcInvL2Norm({  74.0f, 246.0f, 209.0f }),
7597         250.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
7598         6.0f * CalcInvL2Norm({   6.0f,  82.0f,   1.0f }),
7599         195.0f * CalcInvL2Norm({ 195.0f,  28.0f,  64.0f }),
7600         80.0f * CalcInvL2Norm({  80.0f,  37.0f,  54.0f }),
7601
7602         // Batch 0, Channel 1, Height (4) x Width (3)
7603         113.0f * CalcInvL2Norm({ 235.0f, 113.0f,  56.0f }),
7604         95.0f * CalcInvL2Norm({  46.0f,  95.0f, 170.0f }),
7605         202.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
7606         77.0f * CalcInvL2Norm({ 100.0f,  77.0f, 194.0f }),
7607         114.0f * CalcInvL2Norm({ 123.0f, 114.0f,  89.0f }),
7608         71.0f * CalcInvL2Norm({  19.0f,  71.0f, 254.0f }),
7609         122.0f * CalcInvL2Norm({ 172.0f, 122.0f,  12.0f }),
7610         246.0f * CalcInvL2Norm({  74.0f, 246.0f, 209.0f }),
7611         166.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
7612         82.0f * CalcInvL2Norm({   6.0f,  82.0f,   1.0f }),
7613         28.0f * CalcInvL2Norm({ 195.0f,  28.0f,  64.0f }),
7614         37.0f * CalcInvL2Norm({  80.0f,  37.0f,  54.0f }),
7615
7616         // Batch 0, Channel 2, Height (4) x Width (3)
7617         56.0f * CalcInvL2Norm({ 235.0f, 113.0f,  56.0f }),
7618         170.0f * CalcInvL2Norm({  46.0f,  95.0f, 170.0f }),
7619         162.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
7620         194.0f * CalcInvL2Norm({ 100.0f,  77.0f, 194.0f }),
7621         89.0f * CalcInvL2Norm({ 123.0f, 114.0f,  89.0f }),
7622         254.0f * CalcInvL2Norm({  19.0f,  71.0f, 254.0f }),
7623         12.0f * CalcInvL2Norm({ 172.0f, 122.0f,  12.0f }),
7624         209.0f * CalcInvL2Norm({  74.0f, 246.0f, 209.0f }),
7625         200.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
7626         1.0f * CalcInvL2Norm({   6.0f,  82.0f,   1.0f }),
7627         64.0f * CalcInvL2Norm({ 195.0f,  28.0f,  64.0f }),
7628         54.0f * CalcInvL2Norm({  80.0f,  37.0f,  54.0f }),
7629
7630         // Batch 1, Channel 0, Height (4) x Width (3)
7631         67.0f * CalcInvL2Norm({  67.0f, 239.0f,  97.0f }),
7632         90.0f * CalcInvL2Norm({  90.0f, 104.0f, 145.0f }),
7633         49.0f * CalcInvL2Norm({  49.0f, 199.0f, 215.0f }),
7634         7.0f * CalcInvL2Norm({   7.0f,  17.0f, 115.0f }),
7635         163.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
7636         18.0f * CalcInvL2Norm({  18.0f, 153.0f, 238.0f }),
7637         25.0f * CalcInvL2Norm({  25.0f, 222.0f, 226.0f }),
7638         117.0f * CalcInvL2Norm({ 117.0f, 217.0f,  16.0f }),
7639         103.0f * CalcInvL2Norm({ 103.0f,  75.0f, 132.0f }),
7640         247.0f * CalcInvL2Norm({ 247.0f,  32.0f,  92.0f }),
7641         59.0f * CalcInvL2Norm({  59.0f, 126.0f, 125.0f }),
7642         189.0f * CalcInvL2Norm({ 189.0f,  21.0f,  88.0f }),
7643
7644         // Batch 1, Channel 1, Height (4) x Width (3)
7645         239.0f * CalcInvL2Norm({  67.0f, 239.0f,  97.0f }),
7646         104.0f * CalcInvL2Norm({  90.0f, 104.0f, 145.0f }),
7647         199.0f * CalcInvL2Norm({  49.0f, 199.0f, 215.0f }),
7648         17.0f * CalcInvL2Norm({   7.0f,  17.0f, 115.0f }),
7649         124.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
7650         153.0f * CalcInvL2Norm({  18.0f, 153.0f, 238.0f }),
7651         222.0f * CalcInvL2Norm({  25.0f, 222.0f, 226.0f }),
7652         217.0f * CalcInvL2Norm({ 117.0f, 217.0f,  16.0f }),
7653         75.0f * CalcInvL2Norm({ 103.0f,  75.0f, 132.0f }),
7654         32.0f * CalcInvL2Norm({ 247.0f,  32.0f,  92.0f }),
7655         126.0f * CalcInvL2Norm({  59.0f, 126.0f, 125.0f }),
7656         21.0f * CalcInvL2Norm({ 189.0f,  21.0f,  88.0f }),
7657
7658         // Batch 1, Channel 2, Height (4) x Width (3)
7659         97.0f * CalcInvL2Norm({  67.0f, 239.0f,  97.0f }),
7660         145.0f * CalcInvL2Norm({  90.0f, 104.0f, 145.0f }),
7661         215.0f * CalcInvL2Norm({  49.0f, 199.0f, 215.0f }),
7662         115.0f * CalcInvL2Norm({   7.0f,  17.0f, 115.0f }),
7663         116.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
7664         238.0f * CalcInvL2Norm({  18.0f, 153.0f, 238.0f }),
7665         226.0f * CalcInvL2Norm({  25.0f, 222.0f, 226.0f }),
7666         16.0f * CalcInvL2Norm({ 117.0f, 217.0f,  16.0f }),
7667         132.0f * CalcInvL2Norm({ 103.0f,  75.0f, 132.0f }),
7668         92.0f * CalcInvL2Norm({ 247.0f,  32.0f,  92.0f }),
7669         125.0f * CalcInvL2Norm({  59.0f, 126.0f, 125.0f }),
7670         88.0f * CalcInvL2Norm({ 189.0f,  21.0f,  88.0f })
7671     };
7672
7673     return L2NormalizationTestImpl<ArmnnType>(workloadFactory, memoryManager, inputOutputShape, scale, offset,
7674                                               inputValues, outScale, outOffset, expectedOutputValues, layout);
7675 }
7676
7677 LayerTestResult<float, 4> L2Normalization4dTest(
7678     armnn::IWorkloadFactory& workloadFactory,
7679     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7680     const armnn::DataLayout layout)
7681 {
7682     return L2Normalization4dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.f, 0, 0.f, 0,
7683                                                                  layout);
7684 }
7685
7686 LayerTestResult<int16_t, 4> L2Normalization4dInt16Test(
7687     armnn::IWorkloadFactory& workloadFactory,
7688     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7689     const armnn::DataLayout layout)
7690 {
7691     return L2Normalization1dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.f, 0, 1.f, 0,
7692                                                                          layout);
7693 }
7694
7695 LayerTestResult<uint8_t, 4> L2Normalization4dUint8Test(
7696     armnn::IWorkloadFactory& workloadFactory,
7697     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7698     const armnn::DataLayout layout)
7699 {
7700     return L2Normalization1dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.f, 0,
7701                                                                          1.f/128, 128, layout);
7702 }
7703
7704 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
7705 LayerTestResult<T, 4> ConstantTestImpl(
7706     armnn::IWorkloadFactory& workloadFactory,
7707     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7708     float qScale,
7709     int32_t qOffset)
7710 {
7711     constexpr unsigned int inputWidth = 3;
7712     constexpr unsigned int inputHeight = 4;
7713     constexpr unsigned int inputChannels = 3;
7714     constexpr unsigned int inputBatchSize = 2;
7715
7716     constexpr unsigned int outputWidth = inputWidth;
7717     constexpr unsigned int outputHeight = inputHeight;
7718     constexpr unsigned int outputChannels = inputChannels;
7719     constexpr unsigned int outputBatchSize = inputBatchSize;
7720
7721     armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
7722                                         ArmnnType, qScale, qOffset);
7723
7724     armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
7725                                          ArmnnType, qScale, qOffset);
7726
7727     // Set quantization parameters if the requested type is a quantized type.
7728     if(armnn::IsQuantizedType<T>())
7729     {
7730         inputTensorInfo.SetQuantizationScale(qScale);
7731         inputTensorInfo.SetQuantizationOffset(qOffset);
7732         outputTensorInfo.SetQuantizationScale(qScale);
7733         outputTensorInfo.SetQuantizationOffset(qOffset);
7734     }
7735
7736     auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
7737         QuantizedVector<T>(qScale, qOffset, {
7738         // Batch 0, Channel 0
7739         235.0f,  46.0f, 178.0f,
7740         100.0f, 123.0f,  19.0f,
7741         172.0f,  74.0f, 250.0f,
7742           6.0f, 195.0f,  80.0f,
7743
7744         // Batch 0, Channel 1
7745         113.0f,  95.0f, 202.0f,
7746          77.0f, 114.0f,  71.0f,
7747         122.0f, 246.0f, 166.0f,
7748          82.0f,  28.0f,  37.0f,
7749
7750         // Batch 0, Channel 2
7751          56.0f, 170.0f, 162.0f,
7752         194.0f,  89.0f, 254.0f,
7753          12.0f, 209.0f, 200.0f,
7754           1.0f,  64.0f,  54.0f,
7755
7756         // Batch 1, Channel 0
7757          67.0f,  90.0f,  49.0f,
7758           7.0f, 163.0f,  18.0f,
7759          25.0f, 117.0f, 103.0f,
7760         247.0f,  59.0f, 189.0f,
7761
7762         // Batch 1, Channel 1
7763         239.0f, 104.0f, 199.0f,
7764          17.0f, 124.0f, 153.0f,
7765         222.0f, 217.0f, 75.0f,
7766          32.0f, 126.0f, 21.0f,
7767
7768         // Batch 1, Channel 2
7769          97.0f, 145.0f, 215.0f,
7770         115.0f, 116.0f, 238.0f,
7771         226.0f,  16.0f, 132.0f,
7772          92.0f, 125.0f,  88.0f,
7773     })));
7774
7775     LayerTestResult<T, 4> result(outputTensorInfo);
7776     result.outputExpected = input;
7777
7778     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7779
7780     armnn::ScopedCpuTensorHandle constantTensor(inputTensorInfo);
7781     AllocateAndCopyDataToITensorHandle(&constantTensor, &input[0][0][0][0]);
7782
7783     armnn::ConstantQueueDescriptor descriptor;
7784     descriptor.m_LayerOutput = &constantTensor;
7785
7786     armnn::WorkloadInfo info;
7787     AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
7788
7789     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConstant(descriptor, info);
7790
7791     outputHandle->Allocate();
7792
7793     workload->PostAllocationConfigure();
7794     workload->Execute();
7795
7796     CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
7797     return result;
7798 }
7799
7800 LayerTestResult<float, 4> ConstantTest(
7801     armnn::IWorkloadFactory& workloadFactory,
7802     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7803 {
7804     return ConstantTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
7805 }
7806
7807 LayerTestResult<int16_t, 4> ConstantInt16SimpleQuantizationScaleNoOffsetTest(
7808     armnn::IWorkloadFactory& workloadFactory,
7809     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7810 {
7811     return ConstantTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.0f, 0);
7812 }
7813
7814 LayerTestResult<uint8_t, 4> ConstantUint8SimpleQuantizationScaleNoOffsetTest(
7815     armnn::IWorkloadFactory& workloadFactory,
7816     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7817 {
7818     return ConstantTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
7819 }
7820
7821 LayerTestResult<uint8_t, 3> ConcatUint8DifferentQParamsTest(
7822         armnn::IWorkloadFactory& workloadFactory,
7823         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7824 {
7825     unsigned int outputWidth = 3;
7826     unsigned int outputHeight = 6;
7827     unsigned int outputChannels = 3;
7828
7829     unsigned int inputWidth1 = 3;
7830     unsigned int inputHeight1 = 6;
7831     unsigned int inputChannels1 = 2;
7832
7833     unsigned int inputWidth2 = 3;
7834     unsigned int inputHeight2 = 6;
7835     unsigned int inputChannels2 = 1;
7836
7837     // Defines the tensor descriptors.
7838     armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::QuantisedAsymm8);
7839     armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::QuantisedAsymm8);
7840     armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::QuantisedAsymm8);
7841
7842     // Quantized input1 tensor. Range [-3, 1]
7843     const float inputScale1 = 0.015686f;
7844     const int32_t inputOffset1 = 192;
7845
7846     auto input1 = MakeTensor<uint8_t, 3>(inputTensorInfo1, std::vector<uint8_t>(
7847     {
7848         1, 2, 3,
7849         4, 5, 6,
7850         7, 8, 9,
7851         10, 11, 12,
7852         13, 14, 15,
7853         16, 17, 18,
7854
7855         19, 20, 21,
7856         22, 23, 24,
7857         25, 26, 27,
7858         28, 29, 30,
7859         31, 32, 33,
7860         34, 35, 36,
7861     })
7862     );
7863
7864     // Quatized input2 tensor. Range [-1, 4]
7865     const float inputScale2 = 0.019608f;
7866     const int32_t inputOffset2 = 50;
7867
7868     auto input2 = MakeTensor<uint8_t, 3>(inputTensorInfo2, std::vector<uint8_t>(
7869     {
7870         37, 38, 39,
7871         40, 41, 42,
7872         43, 44, 45,
7873         46, 47, 48,
7874         49, 50, 51,
7875         52, 53, 54,
7876     })
7877     );
7878
7879     // Output has the same quantization parameters than input1,
7880     // so that only the requantization of input2 is required
7881     const float outputScale = 0.015686f;
7882     const int32_t outputOffset = 192;
7883
7884     LayerTestResult<uint8_t, 3> ret(outputTensorInfo);
7885
7886     ret.outputExpected = MakeTensor<uint8_t, 3>(outputTensorInfo, std::vector<uint8_t>(
7887     {
7888         1, 2, 3,
7889         4, 5, 6,
7890         7, 8, 9,
7891         10, 11, 12,
7892         13, 14, 15,
7893         16, 17, 18,
7894
7895         19, 20, 21,
7896         22, 23, 24,
7897         25, 26, 27,
7898         28, 29, 30,
7899         31, 32, 33,
7900         34, 35, 36,
7901
7902         176, 177, 178,
7903         179, 181, 182,
7904         183, 184, 186,
7905         187, 188, 189,
7906         191, 192, 193,
7907         195, 196, 197,
7908     })
7909     );
7910
7911     outputTensorInfo.SetQuantizationScale(outputScale);
7912     outputTensorInfo.SetQuantizationOffset(outputOffset);
7913     inputTensorInfo1.SetQuantizationScale(inputScale1);
7914     inputTensorInfo1.SetQuantizationOffset(inputOffset1);
7915     inputTensorInfo2.SetQuantizationScale(inputScale2);
7916     inputTensorInfo2.SetQuantizationOffset(inputOffset2);
7917
7918     std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
7919     armnn::ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
7920
7921     std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
7922     armnn::ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
7923
7924     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7925
7926     bool subTensorsSupported = workloadFactory.SupportsSubTensors();
7927
7928     std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
7929             subTensorsSupported ?
7930             workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
7931             workloadFactory.CreateTensorHandle(inputTensorInfo1);
7932
7933     std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
7934             subTensorsSupported ?
7935             workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
7936             workloadFactory.CreateTensorHandle(inputTensorInfo2);
7937
7938     armnn::ConcatQueueDescriptor data;
7939     armnn::WorkloadInfo info;
7940     AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
7941     AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
7942     AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
7943
7944     data.m_ViewOrigins.push_back(window1);
7945     data.m_ViewOrigins.push_back(window2);
7946
7947     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConcat(data, info);
7948
7949     inputHandle1->Allocate();
7950     inputHandle2->Allocate();
7951     outputHandle->Allocate();
7952
7953     CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
7954     CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
7955
7956     workload->PostAllocationConfigure();
7957     workload->Execute();
7958
7959     CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
7960
7961     return ret;
7962 }
7963
7964 LayerTestResult<uint8_t, 3> ConcatUint8Test(
7965     armnn::IWorkloadFactory& workloadFactory,
7966     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7967 {
7968     unsigned int outputWidth = 3;
7969     unsigned int outputHeight = 6;
7970     unsigned int outputChannels = 3;
7971
7972     unsigned int inputWidth1 = 3;
7973     unsigned int inputHeight1 = 6;
7974     unsigned int inputChannels1 = 2;
7975
7976     unsigned int inputWidth2 = 3;
7977     unsigned int inputHeight2 = 6;
7978     unsigned int inputChannels2 = 1;
7979
7980     // Defines the tensor descriptors.
7981     armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::QuantisedAsymm8);
7982     armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::QuantisedAsymm8);
7983     armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::QuantisedAsymm8);
7984
7985     // Arbitrary scale and offsets. They don't really matter as the Concat operator doesn't dequantize/quantize them.
7986     const float scale = 0.13497836f;
7987     const int32_t offset = -7;
7988
7989     outputTensorInfo.SetQuantizationScale(scale);
7990     outputTensorInfo.SetQuantizationOffset(offset);
7991     inputTensorInfo1.SetQuantizationScale(scale);
7992     inputTensorInfo1.SetQuantizationOffset(offset);
7993     inputTensorInfo2.SetQuantizationScale(scale);
7994     inputTensorInfo2.SetQuantizationOffset(offset);
7995
7996     LayerTestResult<uint8_t, 3> ret(outputTensorInfo);
7997
7998     ret.outputExpected = MakeTensor<uint8_t, 3>(outputTensorInfo, std::vector<uint8_t>(
7999         {
8000             1, 2, 3,
8001             4, 5, 6,
8002             7, 8, 9,
8003             10, 11, 12,
8004             13, 14, 15,
8005             16, 17, 18,
8006
8007             19, 20, 21,
8008             22, 23, 24,
8009             25, 26, 27,
8010             28, 29, 30,
8011             31, 32, 33,
8012             34, 35, 36,
8013
8014             37, 38, 39,
8015             40, 41, 42,
8016             43, 44, 45,
8017             46, 47, 48,
8018             49, 50, 51,
8019             52, 53, 54,
8020         })
8021     );
8022
8023     auto input1 = MakeTensor<uint8_t, 3>(inputTensorInfo1, std::vector<uint8_t>(
8024     {
8025         1, 2, 3,
8026         4, 5, 6,
8027         7, 8, 9,
8028         10, 11, 12,
8029         13, 14, 15,
8030         16, 17, 18,
8031
8032         19, 20, 21,
8033         22, 23, 24,
8034         25, 26, 27,
8035         28, 29, 30,
8036         31, 32, 33,
8037         34, 35, 36,
8038     })
8039     );
8040
8041     auto input2 = MakeTensor<uint8_t, 3>(inputTensorInfo2, std::vector<uint8_t>(
8042     {
8043         37, 38, 39,
8044         40, 41, 42,
8045         43, 44, 45,
8046         46, 47, 48,
8047         49, 50, 51,
8048         52, 53, 54,
8049     })
8050     );
8051
8052     std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
8053     armnn::ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
8054
8055     std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
8056     armnn::ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
8057
8058
8059     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
8060
8061     bool subTensorsSupported = workloadFactory.SupportsSubTensors();
8062
8063     std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
8064         subTensorsSupported ?
8065             workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
8066             workloadFactory.CreateTensorHandle(inputTensorInfo1);
8067
8068     std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
8069         subTensorsSupported ?
8070             workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
8071             workloadFactory.CreateTensorHandle(inputTensorInfo2);
8072
8073
8074     armnn::ConcatQueueDescriptor data;
8075     armnn::WorkloadInfo info;
8076     AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
8077     AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
8078     AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
8079
8080     data.m_ViewOrigins.push_back(window1);
8081     data.m_ViewOrigins.push_back(window2);
8082
8083     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConcat(data, info);
8084
8085     inputHandle1->Allocate();
8086     inputHandle2->Allocate();
8087     outputHandle->Allocate();
8088
8089     CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
8090     CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
8091
8092     workload->PostAllocationConfigure();
8093     workload->Execute();
8094
8095     CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
8096
8097     return ret;
8098 }
8099
8100 LayerTestResult<uint16_t, 3> ConcatUint16Test(
8101         armnn::IWorkloadFactory& workloadFactory,
8102         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8103 {
8104     unsigned int outputWidth = 3;
8105     unsigned int outputHeight = 6;
8106     unsigned int outputChannels = 3;
8107
8108     unsigned int inputWidth1 = 3;
8109     unsigned int inputHeight1 = 6;
8110     unsigned int inputChannels1 = 2;
8111
8112     unsigned int inputWidth2 = 3;
8113     unsigned int inputHeight2 = 6;
8114     unsigned int inputChannels2 = 1;
8115
8116     // Defines the tensor descriptors.
8117     armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::QuantisedSymm16);
8118     armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::QuantisedSymm16);
8119     armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::QuantisedSymm16);
8120
8121     // Arbitrary scale and offsets. They don't really matter as the Concat operator doesn't dequantize/quantize them.
8122     const float scale = 0.13497836f;
8123     const int32_t offset = -7;
8124
8125     outputTensorInfo.SetQuantizationScale(scale);
8126     outputTensorInfo.SetQuantizationOffset(offset);
8127     inputTensorInfo1.SetQuantizationScale(scale);
8128     inputTensorInfo1.SetQuantizationOffset(offset);
8129     inputTensorInfo2.SetQuantizationScale(scale);
8130     inputTensorInfo2.SetQuantizationOffset(offset);
8131
8132     LayerTestResult<uint16_t, 3> ret(outputTensorInfo);
8133
8134     ret.outputExpected = MakeTensor<uint16_t, 3>(outputTensorInfo, std::vector<uint16_t>(
8135     {
8136         1, 2, 3,
8137         4, 5, 6,
8138         7, 8, 9,
8139         10, 11, 12,
8140         13, 14, 15,
8141         16, 17, 18,
8142
8143         19, 20, 21,
8144         22, 23, 24,
8145         25, 26, 27,
8146         28, 29, 30,
8147         31, 32, 33,
8148         34, 35, 36,
8149
8150         37, 38, 39,
8151         40, 41, 42,
8152         43, 44, 45,
8153         46, 47, 48,
8154         49, 50, 51,
8155         52, 53, 54,
8156     }));
8157
8158     auto input1 = MakeTensor<uint16_t, 3>(inputTensorInfo1, std::vector<uint16_t>(
8159     {
8160         1, 2, 3,
8161         4, 5, 6,
8162         7, 8, 9,
8163         10, 11, 12,
8164         13, 14, 15,
8165         16, 17, 18,
8166
8167         19, 20, 21,
8168         22, 23, 24,
8169         25, 26, 27,
8170         28, 29, 30,
8171         31, 32, 33,
8172         34, 35, 36,
8173     }));
8174
8175     auto input2 = MakeTensor<uint16_t, 3>(inputTensorInfo2, std::vector<uint16_t>(
8176     {
8177         37, 38, 39,
8178         40, 41, 42,
8179         43, 44, 45,
8180         46, 47, 48,
8181         49, 50, 51,
8182         52, 53, 54,
8183     }));
8184
8185     std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
8186     armnn::ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
8187
8188     std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
8189     armnn::ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
8190
8191
8192     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
8193
8194     bool subTensorsSupported = workloadFactory.SupportsSubTensors();
8195
8196     std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
8197             subTensorsSupported ?
8198             workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
8199             workloadFactory.CreateTensorHandle(inputTensorInfo1);
8200
8201     std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
8202             subTensorsSupported ?
8203             workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
8204             workloadFactory.CreateTensorHandle(inputTensorInfo2);
8205
8206
8207     armnn::ConcatQueueDescriptor data;
8208     armnn::WorkloadInfo info;
8209     AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
8210     AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
8211     AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
8212
8213     data.m_ViewOrigins.push_back(window1);
8214     data.m_ViewOrigins.push_back(window2);
8215
8216     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConcat(data, info);
8217
8218     inputHandle1->Allocate();
8219     inputHandle2->Allocate();
8220     outputHandle->Allocate();
8221
8222     CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
8223     CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
8224
8225     workload->PostAllocationConfigure();
8226     workload->Execute();
8227
8228     CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
8229
8230     return ret;
8231 }
8232
8233 namespace
8234 {
8235 template <typename T>
8236 LayerTestResult<T, 4> AdditionQuantizeTestHelper(
8237     armnn::IWorkloadFactory& workloadFactory,
8238     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8239     const unsigned int shape0[4],
8240     const std::vector<T>& values0,
8241     float scale0,
8242     int32_t offset0,
8243     const unsigned int shape1[4],
8244     const std::vector<T> & values1,
8245     float scale1,
8246     int32_t offset1,
8247     const unsigned int outShape[4],
8248     const std::vector<T> & outValues,
8249     float outScale,
8250     int32_t outOffset)
8251 {
8252     auto dataType = (std::is_same<T, uint8_t>::value ?
8253                      armnn::DataType::QuantisedAsymm8 :
8254                      armnn::DataType::QuantisedSymm16);
8255
8256     armnn::TensorInfo inputTensorInfo0(4, shape0, dataType);
8257     armnn::TensorInfo inputTensorInfo1(4, shape1, dataType);
8258     armnn::TensorInfo outputTensorInfo(4, outShape, dataType);
8259
8260     inputTensorInfo0.SetQuantizationScale(scale0);
8261     inputTensorInfo0.SetQuantizationOffset(offset0);
8262
8263     inputTensorInfo1.SetQuantizationScale(scale1);
8264     inputTensorInfo1.SetQuantizationOffset(offset1);
8265
8266     outputTensorInfo.SetQuantizationScale(outScale);
8267     outputTensorInfo.SetQuantizationOffset(outOffset);
8268
8269     auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
8270     auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
8271
8272     LayerTestResult<T, 4> result(outputTensorInfo);
8273     result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
8274
8275     std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
8276     std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
8277     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
8278
8279     armnn::AdditionQueueDescriptor data;
8280     armnn::WorkloadInfo info;
8281     AddInputToWorkload(data,  info, inputTensorInfo0, inputHandle0.get());
8282     AddInputToWorkload(data,  info, inputTensorInfo1, inputHandle1.get());
8283     AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
8284
8285     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
8286
8287     inputHandle0->Allocate();
8288     inputHandle1->Allocate();
8289     outputHandle->Allocate();
8290
8291     CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
8292     CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
8293
8294     workload->PostAllocationConfigure();
8295     workload->Execute();
8296
8297     CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
8298
8299     return result;
8300 }
8301 } // anonymous namespace
8302
8303 LayerTestResult<uint8_t, 4> AdditionUint8Test(
8304     armnn::IWorkloadFactory& workloadFactory,
8305     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8306 {
8307     const unsigned int shape0[] = { 1, 2, 2, 3 };
8308     const unsigned int shape1[] = { 1, 2, 2, 3 };
8309
8310     std::vector<uint8_t> input0(
8311     {
8312         63,  35,  77,  70,  56, 112, //  420, 224,  518,  469,  371, 763
8313         203,  28, 252, 168, 245,  91  // 1400, 175, 1743, 1155, 1694, 616
8314     });
8315
8316     std::vector<uint8_t> input1(
8317     {
8318         21,   7, 175, 231, 175, 210, // 126,   28, 1204, 1596, 1204, 1449
8319         126, 161,  63,  21, 105, 126  // 861, 1106,  420,  126,  714,  861
8320     });
8321
8322     std::vector<uint8_t> output(
8323     {
8324         81,  39, 249, 255, 228, 255, //  546,  252, 1722, 2065(clamped), 1575, 2212(clamped)
8325         255, 186, 255, 186, 255, 214, // 2261(clamped), 1281, 2163(clamped), 1281, 2408(clamped), 1477
8326     });
8327
8328     return AdditionQuantizeTestHelper(workloadFactory,
8329                                       memoryManager,
8330                                       shape0, input0, 7.0f, 3,
8331                                       shape1, input1, 7.0f, 3,
8332                                       shape0, output, 7.0f, 3);
8333 }
8334
8335 LayerTestResult<int16_t, 4> AdditionInt16Test(
8336     armnn::IWorkloadFactory& workloadFactory,
8337     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8338 {
8339     const unsigned int shape0[] = { 1, 2, 2, 3 };
8340     const unsigned int shape1[] = { 1, 2, 2, 3 };
8341
8342     std::vector<int16_t> input0(
8343         {
8344             63,  35,  77,  70,  56, 112, //  441, 245,  539,  490,  392, 184
8345             203,  28, 252, 168, 245,  91  // 1421, 196, 1764, 1176, 1715, 637
8346         });
8347
8348     std::vector<int16_t> input1(
8349         {
8350             21,   7, 175, 231, 175, 210, // 126,   28, 1204, 1596, 1204, 1449
8351             126, 161,  63,  21, 105, 126  // 861, 1106,  420,  126,  714,  861
8352         });
8353
8354     std::vector<int16_t> output(
8355         {
8356             84,  42, 252, 301, 231, 322, //  588,  294, 1764, 2107(clamped), 1617, 2254(clamped)
8357             329, 189, 315, 189, 350, 217, // 2303(clamped), 1323, 2205(clamped), 1323, 2450(clamped), 1519
8358         });
8359
8360     return AdditionQuantizeTestHelper(workloadFactory,
8361                                       memoryManager,
8362                                       shape0, input0, 7.0f, 0,
8363                                       shape1, input1, 7.0f, 0,
8364                                       shape0, output, 7.0f, 0);
8365 }
8366
8367 namespace
8368 {
8369 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
8370 LayerTestResult<T, 4> MultiplicationQuantizeTestHelper(
8371     armnn::IWorkloadFactory& workloadFactory,
8372     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8373     const unsigned int shape0[4],
8374     const std::vector<T> & values0,
8375     float scale0,
8376     int32_t offset0,
8377     const unsigned int shape1[4],
8378     const std::vector<T> & values1,
8379     float scale1,
8380     int32_t offset1,
8381     const unsigned int outShape[4],
8382     const std::vector<T> & outValues,
8383     float outScale,
8384     int32_t outOffset)
8385 {
8386     armnn::TensorInfo inputTensorInfo0(4, shape0, ArmnnType);
8387     armnn::TensorInfo inputTensorInfo1(4, shape1, ArmnnType);
8388     armnn::TensorInfo outputTensorInfo(4, outShape, ArmnnType);
8389
8390     inputTensorInfo0.SetQuantizationScale(scale0);
8391     inputTensorInfo0.SetQuantizationOffset(offset0);
8392
8393     inputTensorInfo1.SetQuantizationScale(scale1);
8394     inputTensorInfo1.SetQuantizationOffset(offset1);
8395
8396     outputTensorInfo.SetQuantizationScale(outScale);
8397     outputTensorInfo.SetQuantizationOffset(outOffset);
8398
8399     auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
8400     auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
8401
8402     LayerTestResult<T, 4> result(outputTensorInfo);
8403     result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
8404
8405     std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
8406     std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
8407     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
8408
8409     armnn::MultiplicationQueueDescriptor data;
8410     armnn::WorkloadInfo info;
8411     AddInputToWorkload(data,  info, inputTensorInfo0, inputHandle0.get());
8412     AddInputToWorkload(data,  info, inputTensorInfo1, inputHandle1.get());
8413     AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
8414
8415     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
8416
8417     inputHandle0->Allocate();
8418     inputHandle1->Allocate();
8419     outputHandle->Allocate();
8420
8421     CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
8422     CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
8423
8424     workload->PostAllocationConfigure();
8425     workload->Execute();
8426
8427     CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
8428
8429     return result;
8430 }
8431 } // anonymous namespace
8432
8433 LayerTestResult<uint8_t, 4> MultiplicationUint8Test(
8434     armnn::IWorkloadFactory& workloadFactory,
8435     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8436 {
8437     unsigned int batchSize = 1;
8438     unsigned int channels = 2;
8439     unsigned int height = 2;
8440     unsigned int width = 3;
8441     const unsigned int shape[] = { batchSize, channels, height, width };
8442
8443     // See dequantized values to the right.
8444     std::vector<uint8_t> input0({
8445          62,  37,   3, 172,  13, 111, // 244, 144,   8, 684,  48, 440,
8446         188,  20,  73,  31,  23,  31  // 748,  76, 288, 120,  88, 120
8447     });
8448
8449     // See dequantized values to the right.
8450     std::vector<uint8_t> input1({
8451         126, 240, 252, 183, 121, 247, // 384, 726, 762, 555, 369, 747,
8452          48, 115, 151,  79,  78,  97  // 150, 351, 459, 243, 240, 297
8453     });
8454
8455     // See dequantized values to the right.
8456     std::vector<uint8_t> output(
8457     {
8458          64,  72,   0, 255,   8, 236, //  93696, 104544, 6096(clamped), 379620(clamped), 17712, 328680,
8459          77,  15,  92,  16,  10,  21, // 112200,  26676,        132192,           29160, 21120,  35640
8460     });
8461
8462     // Scale/offset chosen to have output values out of range.
8463     return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
8464                                                                               memoryManager,
8465                                                                               shape,
8466                                                                               input0,
8467                                                                               4.0f,
8468                                                                               1,
8469                                                                               shape,
8470                                                                               input1,
8471                                                                               3.0f,
8472                                                                               -2,
8473                                                                               shape,
8474                                                                               output,
8475                                                                               1366.255f,
8476                                                                               -5);
8477 }
8478
8479 LayerTestResult<uint8_t, 4> MultiplicationBroadcast1ElementUint8Test(
8480     armnn::IWorkloadFactory& workloadFactory,
8481     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8482 {
8483     const unsigned int shape0[] = { 1, 2, 2, 3 };
8484     const unsigned int shape1[] = { 1, 1, 1, 1 };
8485
8486     std::vector<uint8_t> input0({
8487         1, 2, 3,    4,  5,  6,
8488         7, 8, 9,   10, 11, 12
8489     });
8490
8491     std::vector<uint8_t> input1({2});
8492
8493     std::vector<uint8_t> output({
8494         2,  4,   6,     8, 10, 12,
8495         14, 16, 18,    20, 22, 24
8496     });
8497
8498     return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
8499                                                                               memoryManager,
8500                                                                               shape0,
8501                                                                               input0,
8502                                                                               1.0f,
8503                                                                               0,
8504                                                                               shape1,
8505                                                                               input1,
8506                                                                               1.0f,
8507                                                                               0,
8508                                                                               shape0,
8509                                                                               output,
8510                                                                               1.0f,
8511                                                                               0);
8512 }
8513
8514 LayerTestResult<uint8_t, 4> MultiplicationBroadcast1DVectorUint8Test(
8515     armnn::IWorkloadFactory& workloadFactory,
8516     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8517 {
8518     const unsigned int shape0[] = { 1, 2, 2, 3 };
8519     const unsigned int shape1[] = { 1, 1, 1, 3 };
8520
8521     std::vector<uint8_t> input0({
8522         1, 2, 3,    4,  5,  6,
8523         7, 8, 9,   10, 11, 12
8524     });
8525
8526     std::vector<uint8_t> input1({1, 2, 3});
8527
8528     std::vector<uint8_t> output({
8529         1,  4,   9,     4, 10, 18,
8530         7, 16,  27,    10, 22, 36
8531     });
8532
8533     return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
8534                                                                               memoryManager,
8535                                                                               shape0,
8536                                                                               input0,
8537                                                                               1.0f,
8538                                                                               0,
8539                                                                               shape1,
8540                                                                               input1,
8541                                                                               1.0f,
8542                                                                               0,
8543                                                                               shape0,
8544                                                                               output,
8545                                                                               1.0f,
8546                                                                               0);
8547 }
8548
8549 LayerTestResult<int16_t, 4> MultiplicationInt16Test(
8550     armnn::IWorkloadFactory& workloadFactory,
8551     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8552 {
8553     const unsigned int shape[] = { 1, 2, 2, 3 };
8554
8555     std::vector<int16_t> input0(
8556     {
8557         6,   7,  8,  9, 10, 11,
8558         12, 13, 14, 15, 16, 17
8559     });
8560
8561     std::vector<int16_t> input1(
8562     {
8563         1, 2, 3,  4,  5,  6,
8564         7, 8, 9, 10, 11, 12
8565     });
8566
8567     std::vector<int16_t> output(
8568     {
8569         6,   14,  24,  36,  50,  66,
8570         84, 104, 126, 150, 176, 204
8571     });
8572
8573     return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
8574                                                                               memoryManager,
8575                                                                               shape,
8576                                                                               input0,
8577                                                                               1.0f,
8578                                                                               0,
8579                                                                               shape,
8580                                                                               input1,
8581                                                                               1.0f,
8582                                                                               0,
8583                                                                               shape,
8584                                                                               output,
8585                                                                               1.0f,
8586                                                                               0);
8587 }
8588
8589 LayerTestResult<int16_t, 4> MultiplicationBroadcast1ElementInt16Test(
8590     armnn::IWorkloadFactory& workloadFactory,
8591     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8592 {
8593     const unsigned int shape0[] = { 1, 2, 2, 3 };
8594     const unsigned int shape1[] = { 1, 1, 1, 1 };
8595
8596     std::vector<int16_t> input0(
8597     {
8598         1, 2, 3,  4,  5,  6,
8599         7, 8, 9, 10, 11, 12
8600     });
8601
8602     std::vector<int16_t> input1({2});
8603
8604     std::vector<int16_t> output(
8605     {
8606         2,   4,  6,  8, 10, 12,
8607         14, 16, 18, 20, 22, 24
8608     });
8609
8610     return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
8611                                                                               memoryManager,
8612                                                                               shape0,
8613                                                                               input0,
8614                                                                               1.0f,
8615                                                                               0,
8616                                                                               shape1,
8617                                                                               input1,
8618                                                                               1.0f,
8619                                                                               0,
8620                                                                               shape0,
8621                                                                               output,
8622                                                                               1.0f,
8623                                                                               0);
8624 }
8625
8626 LayerTestResult<int16_t, 4> MultiplicationBroadcast1DVectorInt16Test(
8627     armnn::IWorkloadFactory& workloadFactory,
8628     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8629 {
8630     const unsigned int shape0[] = { 1, 2, 2, 3 };
8631     const unsigned int shape1[] = { 1, 1, 1, 3 };
8632
8633     std::vector<int16_t> input0(
8634     {
8635         1, 2, 3,  4,  5,  6,
8636         7, 8, 9, 10, 11, 12
8637     });
8638
8639     std::vector<int16_t> input1({1, 2, 3});
8640
8641     std::vector<int16_t> output(
8642     {
8643         1,  4,  9,  4, 10, 18,
8644         7, 16, 27, 10, 22, 36
8645     });
8646
8647     return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
8648                                                                               memoryManager,
8649                                                                               shape0,
8650                                                                               input0,
8651                                                                               1.0f,
8652                                                                               0,
8653                                                                               shape1,
8654                                                                               input1,
8655                                                                               1.0f,
8656                                                                               0,
8657                                                                               shape0,
8658                                                                               output,
8659                                                                               1.0f,
8660                                                                               0);
8661 }
8662
8663 namespace
8664 {
8665 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
8666 LayerTestResult<T, 4> SubtractionTestHelper(
8667     armnn::IWorkloadFactory& workloadFactory,
8668     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8669     const unsigned int shape0[4],
8670     const std::vector<T>& values0,
8671     float scale0,
8672     int32_t offset0,
8673     const unsigned int shape1[4],
8674     const std::vector<T> & values1,
8675     float scale1,
8676     int32_t offset1,
8677     const unsigned int outShape[4],
8678     const std::vector<T> & outValues,
8679     float outScale,
8680     int32_t outOffset)
8681 {
8682     armnn::TensorInfo inputTensorInfo0(4, shape0, ArmnnType);
8683     armnn::TensorInfo inputTensorInfo1(4, shape1, ArmnnType);
8684     armnn::TensorInfo outputTensorInfo(4, outShape, ArmnnType);
8685
8686     inputTensorInfo0.SetQuantizationScale(scale0);
8687     inputTensorInfo0.SetQuantizationOffset(offset0);
8688
8689     inputTensorInfo1.SetQuantizationScale(scale1);
8690     inputTensorInfo1.SetQuantizationOffset(offset1);
8691
8692     outputTensorInfo.SetQuantizationScale(outScale);
8693     outputTensorInfo.SetQuantizationOffset(outOffset);
8694
8695     auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
8696     auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
8697
8698     LayerTestResult<T, 4> result(outputTensorInfo);
8699     result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
8700
8701     std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
8702     std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
8703     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
8704
8705     armnn::SubtractionQueueDescriptor data;
8706     armnn::WorkloadInfo info;
8707     AddInputToWorkload(data,  info, inputTensorInfo0, inputHandle0.get());
8708     AddInputToWorkload(data,  info, inputTensorInfo1, inputHandle1.get());
8709     AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
8710
8711     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateSubtraction(data, info);
8712
8713     inputHandle0->Allocate();
8714     inputHandle1->Allocate();
8715     outputHandle->Allocate();
8716
8717     CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
8718     CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
8719
8720     workload->PostAllocationConfigure();
8721     workload->Execute();
8722
8723     CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
8724
8725     return result;
8726 }
8727 } // anonymous namespace
8728
8729 LayerTestResult<uint8_t, 4> SubtractionUint8Test(
8730     armnn::IWorkloadFactory& workloadFactory,
8731     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8732 {
8733     const unsigned int shape0[] = { 1, 1, 2, 2 };
8734     const unsigned int shape1[] = { 1, 1, 2, 2 };
8735
8736     std::vector<uint8_t> input0({ 10, 12, 14, 16 });
8737     std::vector<uint8_t> input1({ 1, 2, 1, 2 });
8738     std::vector<uint8_t> output({ 3, 3, 5, 5 });
8739
8740     return SubtractionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
8741                                                                    memoryManager,
8742                                                                    shape0, input0, 0.5f, 2,
8743                                                                    shape1, input1, 1.0f, 0,
8744                                                                    shape0, output, 1.0f, 0);
8745 }
8746
8747 LayerTestResult<uint8_t, 4> SubtractionBroadcast1ElementUint8Test(
8748     armnn::IWorkloadFactory& workloadFactory,
8749     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8750 {
8751     const unsigned int shape0[] = { 1, 1, 2, 2 };
8752     const unsigned int shape1[] = { 1, 1, 1, 1 };
8753
8754     std::vector<uint8_t> input0({ 10, 12, 14, 16 });
8755     std::vector<uint8_t> input1({ 2 });
8756     std::vector<uint8_t> output({ 5, 6, 7, 8 });
8757
8758     return SubtractionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
8759                                                                    memoryManager,
8760                                                                    shape0, input0, 0.5f, 2,
8761                                                                    shape1, input1, 1.0f, 0,
8762                                                                    shape0, output, 1.0f, 3);
8763 }
8764
8765 LayerTestResult<uint8_t, 4> SubtractionBroadcastUint8Test(
8766     armnn::IWorkloadFactory& workloadFactory,
8767     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8768 {
8769     const unsigned int shape0[] = { 1, 1, 2, 2 };
8770     const unsigned int shape1[] = { 1, 1, 2, 1 };
8771
8772     std::vector<uint8_t> input0({ 10, 12, 14, 16 });
8773     std::vector<uint8_t> input1({ 2, 1 });
8774     std::vector<uint8_t> output({ 8, 11, 12, 15 });
8775
8776     return SubtractionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
8777                                                                    memoryManager,
8778                                                                    shape0, input0, 1.0f, 0,
8779                                                                    shape1, input1, 1.0f, 0,
8780                                                                    shape0, output, 1.0f, 0);
8781 }
8782
8783 LayerTestResult<float, 4> SubtractionTest(
8784     armnn::IWorkloadFactory& workloadFactory,
8785     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8786 {
8787     const unsigned int shape0[] = { 1, 1, 2, 2 };
8788     const unsigned int shape1[] = { 1, 1, 2, 2 };
8789
8790     std::vector<float> input0({ 1,  2, 3, 4 });
8791     std::vector<float> input1({ 1, -1, 0, 2 });
8792     std::vector<float> output({ 0,  3, 3, 2 });
8793
8794     return SubtractionTestHelper<armnn::DataType::Float32>(workloadFactory,
8795                                                            memoryManager,
8796                                                            shape0, input0, 1.0f, 0,
8797                                                            shape1, input1, 1.0f, 0,
8798                                                            shape0, output, 1.0f, 0);
8799 }
8800
8801 LayerTestResult<float, 4> SubtractionBroadcast1ElementTest(
8802     armnn::IWorkloadFactory& workloadFactory,
8803     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8804 {
8805     const unsigned int shape0[] = { 1, 1, 2, 2 };
8806     const unsigned int shape1[] = { 1, 1, 1, 1 };
8807
8808     std::vector<float> input0({ 1,  2, 3, 4 });
8809     std::vector<float> input1({ 10 });
8810     std::vector<float> output({ -9,  -8, -7, -6 });
8811
8812     return SubtractionTestHelper<armnn::DataType::Float32>(workloadFactory,
8813                                                            memoryManager,
8814                                                            shape0, input0, 1.0f, 0,
8815                                                            shape1, input1, 1.0f, 0,
8816                                                            shape0, output, 1.0f, 0);
8817 }
8818
8819 LayerTestResult<float, 4> SubtractionBroadcastTest(
8820     armnn::IWorkloadFactory& workloadFactory,
8821     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8822 {
8823     const unsigned int shape0[] = { 1, 1, 2, 2 };
8824     const unsigned int shape1[] = { 1, 1, 1, 2 };
8825
8826     std::vector<float> input0({ 1,  2, 3, 4 });
8827     std::vector<float> input1({ 10, -5 });
8828     std::vector<float> output({ -9,  7, -7, 9 });
8829
8830     return SubtractionTestHelper<armnn::DataType::Float32>(workloadFactory,
8831                                                            memoryManager,
8832                                                            shape0, input0, 1.0f, 0,
8833                                                            shape1, input1, 1.0f, 0,
8834                                                            shape0, output, 1.0f, 0);
8835 }
8836
8837 LayerTestResult<int16_t, 4> SubtractionInt16Test(
8838     armnn::IWorkloadFactory& workloadFactory,
8839     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8840 {
8841     const unsigned int shape0[] = { 1, 1, 2, 2 };
8842     const unsigned int shape1[] = { 1, 1, 2, 2 };
8843
8844     std::vector<int16_t> input0({ 10, 12, 14, 16 });
8845     std::vector<int16_t> input1({ 1, 2, 1, 2 });
8846     std::vector<int16_t> output({ 3, 3, 5, 5 });
8847
8848     return SubtractionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
8849                                                                    memoryManager,
8850                                                                    shape0, input0, 0.5f, 0,
8851                                                                    shape1, input1, 1.0f, 0,
8852                                                                    shape0, output, 1.0f, 0);
8853 }
8854
8855 LayerTestResult<int16_t, 4> SubtractionBroadcast1ElementInt16Test(
8856     armnn::IWorkloadFactory& workloadFactory,
8857     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8858 {
8859     const unsigned int shape0[] = { 1, 1, 2, 2 };
8860     const unsigned int shape1[] = { 1, 1, 1, 1 };
8861
8862     std::vector<int16_t> input0({ 10, 12, 14, 16 });
8863     std::vector<int16_t> input1({ 2 });
8864     std::vector<int16_t> output({ 3, 4, 5, 6 });
8865
8866     return SubtractionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
8867                                                                    memoryManager,
8868                                                                    shape0, input0, 0.5f, 0,
8869                                                                    shape1, input1, 1.0f, 0,
8870                                                                    shape0, output, 1.0f, 0);
8871 }
8872
8873 LayerTestResult<int16_t, 4> SubtractionBroadcastInt16Test(
8874     armnn::IWorkloadFactory& workloadFactory,
8875     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8876 {
8877     const unsigned int shape0[] = { 1, 1, 2, 2 };
8878     const unsigned int shape1[] = { 1, 1, 2, 1 };
8879
8880     std::vector<int16_t> input0({ 10, 12, 14, 16 });
8881     std::vector<int16_t> input1({ 2, 1 });
8882     std::vector<int16_t> output({ 8, 11, 12, 15 });
8883
8884     return SubtractionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
8885                                                                    memoryManager,
8886                                                                    shape0, input0, 1.0f, 0,
8887                                                                    shape1, input1, 1.0f, 0,
8888                                                                    shape0, output, 1.0f, 0);
8889 }
8890
8891 LayerTestResult<float, 4> BatchNormTest(
8892     armnn::IWorkloadFactory& workloadFactory,
8893     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8894 {
8895     // BatchSize: 1
8896     // Channels: 2
8897     // Height: 3
8898     // Width: 2
8899
8900     const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
8901     std::vector<float> inputValues
8902     {
8903         // Batch 0, Channel 0, Height (3) x Width (2)
8904          1.f, 4.f,
8905          4.f, 2.f,
8906          1.f, 6.f,
8907
8908         // Batch 0, Channel 1, Height (3) x Width (2)
8909          1.f, 1.f,
8910          4.f, 1.f,
8911         -2.f, 4.f
8912     };
8913     std::vector<float> expectedOutputValues
8914     {
8915         // Batch 0, Channel 0, Height (3) x Width (2)
8916         1.f, 4.f,
8917         4.f, 2.f,
8918         1.f, 6.f,
8919
8920         // Batch 0, Channel 1, Height (3) x Width (2)
8921         3.f, 3.f,
8922         4.f, 3.f,
8923         2.f, 4.f
8924     };
8925
8926     return BatchNormTestImpl<armnn::DataType::Float32>(
8927         workloadFactory, memoryManager,
8928         inputOutputShape, inputValues, expectedOutputValues,
8929         0.f, 0, armnn::DataLayout::NCHW);
8930 }
8931
8932 LayerTestResult<float, 4> BatchNormNhwcTest(
8933     armnn::IWorkloadFactory& workloadFactory,
8934     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8935 {
8936     // BatchSize: 1
8937     // Height: 3
8938     // Width: 2
8939     // Channels: 2
8940
8941     const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
8942     std::vector<float> inputValues
8943     {
8944         // Batch 0, Height 0, Width (2) x Channel (2)
8945         1.f,  1.f,
8946         4.f,  1.f,
8947
8948         // Batch 0, Height 1, Width (2) x Channel (2)
8949         4.f,  4.f,
8950         2.f,  1.f,
8951
8952         // Batch 0, Height 2, Width (2) x Channel (2)
8953         1.f, -2.f,
8954         6.f,  4.f
8955     };
8956     std::vector<float> expectedOutputValues
8957     {
8958         // Batch 0, Height 0, Width (2) x Channel (2)
8959         1.f, 3.f,
8960         4.f, 3.f,
8961
8962         // Batch 0, Height 1, Width (2) x Channel (2)
8963         4.f, 4.f,
8964         2.f, 3.f,
8965
8966         // Batch 0, Height 2, Width (2) x Channel (2)
8967         1.f, 2.f,
8968         6.f, 4.f
8969     };
8970
8971     return BatchNormTestImpl<armnn::DataType::Float32>(
8972         workloadFactory, memoryManager,
8973         inputOutputShape, inputValues, expectedOutputValues,
8974         0.f, 0, armnn::DataLayout::NHWC);
8975 }
8976
8977 LayerTestResult<uint8_t, 4> BatchNormUint8Test(
8978     armnn::IWorkloadFactory& workloadFactory,
8979     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8980 {
8981     // BatchSize: 1
8982     // Channels: 2
8983     // Height: 3
8984     // Width: 2
8985
8986     const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
8987     std::vector<float> inputValues
8988     {
8989         // Batch 0, Channel 0, Height (3) x Width (2)
8990          1.f, 4.f,
8991          4.f, 2.f,
8992          1.f, 6.f,
8993
8994         // Batch 0, Channel 1, Height (3) x Width (2)
8995          1.f, 1.f,
8996          4.f, 1.f,
8997         -2.f, 4.f
8998     };
8999     std::vector<float> expectedOutputValues
9000     {
9001         // Batch 0, Channel 0, Height (3) x Width (2)
9002         1.f, 4.f,
9003         4.f, 2.f,
9004         1.f, 6.f,
9005
9006         // Batch 0, Channel 1, Height (3) x Width (2)
9007         3.f, 3.f,
9008         4.f, 3.f,
9009         2.f, 4.f
9010     };
9011
9012     return BatchNormTestImpl<armnn::DataType::QuantisedAsymm8>(
9013         workloadFactory, memoryManager,
9014         inputOutputShape, inputValues, expectedOutputValues,
9015         1.f/20.f, 50, armnn::DataLayout::NCHW);
9016 }
9017
9018 LayerTestResult<uint8_t, 4> BatchNormUint8NhwcTest(
9019     armnn::IWorkloadFactory& workloadFactory,
9020     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9021 {
9022     // BatchSize: 1
9023     // Height: 3
9024     // Width: 2
9025     // Channels: 2
9026
9027     const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
9028     std::vector<float> inputValues
9029     {
9030         // Batch 0, Height 0, Width (2) x Channel (2)
9031         1.f,  1.f,
9032         4.f,  1.f,
9033
9034         // Batch 0, Height 1, Width (2) x Channel (2)
9035         4.f,  4.f,
9036         2.f,  1.f,
9037
9038         // Batch 0, Height 2, Width (2) x Channel (2)
9039         1.f, -2.f,
9040         6.f,  4.f
9041     };
9042     std::vector<float> expectedOutputValues
9043     {
9044         // Batch 0, Height 0, Width (2) x Channel (2)
9045         1.f, 3.f,
9046         4.f, 3.f,
9047
9048         // Batch 0, Height 1, Width (2) x Channel (2)
9049         4.f, 4.f,
9050         2.f, 3.f,
9051
9052         // Batch 0, Height 2, Width (2) x Channel (2)
9053         1.f, 2.f,
9054         6.f, 4.f
9055     };
9056
9057     return BatchNormTestImpl<armnn::DataType::QuantisedAsymm8>
9058         (workloadFactory, memoryManager,
9059          inputOutputShape, inputValues, expectedOutputValues,
9060          1.f/20.f, 50, armnn::DataLayout::NHWC);
9061 }
9062
9063 LayerTestResult<int16_t, 4> BatchNormInt16Test(
9064     armnn::IWorkloadFactory& workloadFactory,
9065     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9066 {
9067     // BatchSize: 1
9068     // Channels: 2
9069     // Height: 3
9070     // Width: 2
9071
9072     const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
9073     std::vector<float> inputValues
9074     {
9075         // Batch 0, Channel 0, Height (3) x Width (2)
9076          1.f, 4.f,
9077          4.f, 2.f,
9078          1.f, 6.f,
9079
9080         // Batch 0, Channel 1, Height (3) x Width (2)
9081          1.f, 1.f,
9082          4.f, 1.f,
9083         -2.f, 4.f
9084     };
9085     std::vector<float> expectedOutputValues
9086     {
9087         // Batch 0, Channel 0, Height (3) x Width (2)
9088         1.f, 4.f,
9089         4.f, 2.f,
9090         1.f, 6.f,
9091
9092         // Batch 0, Channel 1, Height (3) x Width (2)
9093         3.f, 3.f,
9094         4.f, 3.f,
9095         2.f, 4.f
9096     };
9097
9098     return BatchNormTestImpl<armnn::DataType::QuantisedSymm16>(
9099         workloadFactory, memoryManager,
9100         inputOutputShape, inputValues, expectedOutputValues,
9101         1.f/20.f, 50, armnn::DataLayout::NCHW);
9102 }
9103
9104 LayerTestResult<int16_t, 4> BatchNormInt16NhwcTest(
9105     armnn::IWorkloadFactory& workloadFactory,
9106     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9107 {
9108     // BatchSize: 1
9109     // Height: 3
9110     // Width: 2
9111     // Channels: 2
9112
9113     const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
9114     std::vector<float> inputValues
9115     {
9116         // Batch 0, Height 0, Width (2) x Channel (2)
9117         1.f,  1.f,
9118         4.f,  1.f,
9119
9120         // Batch 0, Height 1, Width (2) x Channel (2)
9121         4.f,  4.f,
9122         2.f,  1.f,
9123
9124         // Batch 0, Height 2, Width (2) x Channel (2)
9125         1.f, -2.f,
9126         6.f,  4.f
9127     };
9128     std::vector<float> expectedOutputValues
9129     {
9130         // Batch 0, Height 0, Width (2) x Channel (2)
9131         1.f, 3.f,
9132         4.f, 3.f,
9133
9134         // Batch 0, Height 1, Width (2) x Channel (2)
9135         4.f, 4.f,
9136         2.f, 3.f,
9137
9138         // Batch 0, Height 2, Width (2) x Channel (2)
9139         1.f, 2.f,
9140         6.f, 4.f
9141     };
9142
9143     return BatchNormTestImpl<armnn::DataType::QuantisedSymm16>
9144         (workloadFactory, memoryManager,
9145          inputOutputShape, inputValues, expectedOutputValues,
9146          1.f/20.f, 50, armnn::DataLayout::NHWC);
9147 }
9148
9149 LayerTestResult<uint8_t, 4> ConstantUint8CustomQuantizationScaleAndOffsetTest(
9150     armnn::IWorkloadFactory& workloadFactory,
9151     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9152 {
9153     return ConstantTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 2e-6f, 1);
9154 }
9155
9156 LayerTestResult<int16_t, 4> ConstantInt16CustomQuantizationScaleAndOffsetTest(
9157     armnn::IWorkloadFactory& workloadFactory,
9158     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9159 {
9160     return ConstantTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 2e-6f, 1);
9161 }
9162
9163 LayerTestResult<uint8_t, 1> Concatenation1dUint8Test(
9164     armnn::IWorkloadFactory& workloadFactory,
9165     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9166 {
9167     return Concatenation1dTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
9168 }
9169
9170 LayerTestResult<uint8_t, 2> Concatenation2dDim0Uint8Test(
9171     armnn::IWorkloadFactory& workloadFactory,
9172     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9173 {
9174     return Concatenation2dDim0TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
9175 }
9176
9177 LayerTestResult<uint8_t, 2> Concatenation2dDim1Uint8Test(
9178     armnn::IWorkloadFactory& workloadFactory,
9179     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9180 {
9181     return Concatenation2dDim1TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
9182 }
9183
9184 LayerTestResult<uint8_t, 2> Concatenation2dDim0DiffInputDimsUint8Test(
9185     armnn::IWorkloadFactory& workloadFactory,
9186     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9187 {
9188     return Concatenation2dDim0DiffInputDimsTestImpl<armnn::DataType::QuantisedAsymm8>(
9189         workloadFactory, memoryManager, 0.5f, -1);
9190 }
9191
9192 LayerTestResult<uint8_t, 2> Concatenation2dDim1DiffInputDimsUint8Test(
9193     armnn::IWorkloadFactory& workloadFactory,
9194     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9195 {
9196     return Concatenation2dDim1DiffInputDimsTestImpl<armnn::DataType::QuantisedAsymm8>(
9197         workloadFactory, memoryManager, 0.5f, -1);
9198 }
9199
9200 LayerTestResult<uint8_t, 3> Concatenation3dDim0Uint8Test(
9201     armnn::IWorkloadFactory& workloadFactory,
9202     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9203 {
9204     return Concatenation3dDim0TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
9205 }
9206
9207 LayerTestResult<uint8_t, 3> Concatenation3dDim1Uint8Test(
9208     armnn::IWorkloadFactory& workloadFactory,
9209     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9210 {
9211     return Concatenation3dDim1TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
9212 }
9213
9214 LayerTestResult<uint8_t, 3> Concatenation3dDim2Uint8Test(
9215     armnn::IWorkloadFactory& workloadFactory,
9216     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9217     bool useSubtensor)
9218 {
9219     return Concatenation3dDim2TestImpl<armnn::DataType::QuantisedAsymm8>(
9220         workloadFactory, memoryManager, useSubtensor, 0.5f, -1);
9221 }
9222
9223 LayerTestResult<uint8_t, 3> Concatenation3dDim0DiffInputDimsUint8Test(
9224     armnn::IWorkloadFactory& workloadFactory,
9225     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9226 {
9227     return Concatenation3dDim0TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
9228 }
9229
9230 LayerTestResult<uint8_t, 3> Concatenation3dDim1DiffInputDimsUint8Test(
9231     armnn::IWorkloadFactory& workloadFactory,
9232     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9233 {
9234     return Concatenation3dDim1DiffInputDimsTestImpl<armnn::DataType::QuantisedAsymm8>(
9235         workloadFactory, memoryManager, 0.5f, -1);
9236 }
9237
9238 LayerTestResult<uint8_t, 3> Concatenation3dDim2DiffInputDimsUint8Test(
9239     armnn::IWorkloadFactory& workloadFactory,
9240     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9241     bool useSubtensor)
9242 {
9243     return Concatenation3dDim2DiffInputDimsTestImpl<armnn::DataType::QuantisedAsymm8>(
9244         workloadFactory, memoryManager, useSubtensor, 0.5f, -1);
9245 }
9246
9247 LayerTestResult<uint8_t, 4> Concatenation4dDim0Uint8Test(
9248     armnn::IWorkloadFactory& workloadFactory,
9249     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9250 {
9251     return Concatenation4dDim0TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
9252 }
9253
9254 LayerTestResult<uint8_t, 4> Concatenation4dDim1Uint8Test(
9255     armnn::IWorkloadFactory& workloadFactory,
9256     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9257 {
9258     return Concatenation4dDim1TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
9259 }
9260
9261 LayerTestResult<uint8_t, 4> Concatenation4dDim2Uint8Test(
9262     armnn::IWorkloadFactory& workloadFactory,
9263     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9264 {
9265     return Concatenation4dDim2TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
9266 }
9267
9268 LayerTestResult<uint8_t, 4> Concatenation4dDim3Uint8Test(
9269     armnn::IWorkloadFactory& workloadFactory,
9270     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, bool useSubtensor)
9271 {
9272     return Concatenation4dDim3TestImpl<armnn::DataType::QuantisedAsymm8>(
9273         workloadFactory, memoryManager, 0.5f, -1, useSubtensor);
9274 }
9275
9276 LayerTestResult<uint8_t, 4> Concatenation4dDiffShapeDim0Uint8Test(
9277     armnn::IWorkloadFactory& workloadFactory,
9278     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9279 {
9280     return Concatenation4dDiffShapeDim0TestImpl<armnn::DataType::QuantisedAsymm8>(
9281         workloadFactory, memoryManager, 0.5f, -1);
9282 }
9283
9284 LayerTestResult<uint8_t, 4> Concatenation4dDiffShapeDim1Uint8Test(
9285     armnn::IWorkloadFactory& workloadFactory,
9286     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9287 {
9288     return Concatenation4dDiffShapeDim1TestImpl<armnn::DataType::QuantisedAsymm8>(
9289         workloadFactory, memoryManager, 0.5f, -1);
9290 }
9291
9292 LayerTestResult<uint8_t, 4> Concatenation4dDiffShapeDim2Uint8Test(
9293     armnn::IWorkloadFactory& workloadFactory,
9294     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9295 {
9296     return Concatenation4dDiffShapeDim2TestImpl<armnn::DataType::QuantisedAsymm8>(
9297         workloadFactory, memoryManager, 0.5f, -1);
9298 }
9299
9300 LayerTestResult<uint8_t, 4> Concatenation4dDiffShapeDim3Uint8Test(
9301     armnn::IWorkloadFactory& workloadFactory,
9302     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9303     bool useSubtensor)
9304 {
9305     return Concatenation4dDiffShapeDim3TestImpl<armnn::DataType::QuantisedAsymm8>(
9306         workloadFactory, memoryManager, 0.5f, -1, useSubtensor);
9307 }
9308
9309 LayerTestResult<float, 4> SimpleMaxPooling2dSize2x2Stride2x2Test(
9310     armnn::IWorkloadFactory& workloadFactory,
9311     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9312     bool forceNoPadding)
9313 {
9314     return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::Float32>(
9315         workloadFactory, memoryManager, forceNoPadding);
9316 }
9317
9318 LayerTestResult<uint8_t, 4> SimpleMaxPooling2dSize2x2Stride2x2Uint8Test(
9319     armnn::IWorkloadFactory& workloadFactory,
9320     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9321     bool forceNoPadding)
9322 {
9323     return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::QuantisedAsymm8>(
9324         workloadFactory, memoryManager, forceNoPadding, 3.0f, -5);
9325 }
9326
9327 LayerTestResult<int16_t, 4> SimpleMaxPooling2dSize2x2Stride2x2Int16Test(
9328     armnn::IWorkloadFactory& workloadFactory,
9329     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9330     bool forceNoPadding)
9331 {
9332     return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::QuantisedSymm16>(
9333             workloadFactory, memoryManager, forceNoPadding);
9334 }
9335
9336 LayerTestResult<float, 4> SimpleMaxPooling2dSize3x3Stride2x4Test(
9337     armnn::IWorkloadFactory& workloadFactory,
9338     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9339     bool forceNoPadding)
9340 {
9341     return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::Float32>(
9342         workloadFactory, memoryManager, forceNoPadding);
9343 }
9344
9345 LayerTestResult<uint8_t, 4> SimpleMaxPooling2dSize3x3Stride2x4Uint8Test(
9346     armnn::IWorkloadFactory& workloadFactory,
9347     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9348     bool forceNoPadding)
9349 {
9350     return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::QuantisedAsymm8>(
9351         workloadFactory, memoryManager, forceNoPadding, 0.1f, 128);
9352 }
9353
9354 LayerTestResult<int16_t, 4> SimpleMaxPooling2dSize3x3Stride2x4Int16Test(
9355     armnn::IWorkloadFactory& workloadFactory,
9356     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9357     bool forceNoPadding)
9358 {
9359     return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::QuantisedSymm16>(
9360             workloadFactory, memoryManager, forceNoPadding);
9361 }
9362
9363 LayerTestResult<float, 4> SimpleMaxPooling2dTest(
9364     armnn::IWorkloadFactory& workloadFactory,
9365     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9366     const armnn::DataLayout dataLayout)
9367 {
9368     return SimpleMaxPooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
9369 }
9370
9371 LayerTestResult<uint8_t, 4> SimpleMaxPooling2dUint8Test(
9372     armnn::IWorkloadFactory& workloadFactory,
9373     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9374     const armnn::DataLayout dataLayout)
9375 {
9376     return SimpleMaxPooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, dataLayout);
9377 }
9378
9379 LayerTestResult<int16_t, 4> SimpleMaxPooling2dInt16Test(
9380     armnn::IWorkloadFactory& workloadFactory,
9381     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9382     const armnn::DataLayout dataLayout)
9383 {
9384     return SimpleMaxPooling2dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, dataLayout);
9385 }
9386 LayerTestResult<float, 4> IgnorePaddingSimpleMaxPooling2dTest(
9387     armnn::IWorkloadFactory& workloadFactory,
9388     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9389 {
9390     return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
9391 }
9392
9393 LayerTestResult<uint8_t, 4> IgnorePaddingSimpleMaxPooling2dUint8Test(
9394     armnn::IWorkloadFactory& workloadFactory,
9395     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9396 {
9397     return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
9398             workloadFactory, memoryManager, 1.0f, -5);
9399 }
9400
9401 LayerTestResult<int16_t, 4> IgnorePaddingSimpleMaxPooling2dInt16Test(
9402     armnn::IWorkloadFactory& workloadFactory,
9403     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9404 {
9405     return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::QuantisedSymm16>(
9406             workloadFactory, memoryManager);
9407 }
9408
9409 LayerTestResult<float, 4> IgnorePaddingMaxPooling2dSize3Test(
9410     armnn::IWorkloadFactory& workloadFactory,
9411     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9412 {
9413     return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
9414 }
9415
9416 LayerTestResult<uint8_t, 4> IgnorePaddingMaxPooling2dSize3Uint8Test(
9417     armnn::IWorkloadFactory& workloadFactory,
9418     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9419 {
9420     return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::QuantisedAsymm8>(
9421             workloadFactory, memoryManager, 1.0f, -5);
9422 }
9423
9424 LayerTestResult<int16_t, 4> IgnorePaddingMaxPooling2dSize3Int16Test(
9425     armnn::IWorkloadFactory& workloadFactory,
9426     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9427 {
9428     return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::QuantisedSymm16>(
9429             workloadFactory, memoryManager);
9430 }
9431
9432 LayerTestResult<float, 4> SimpleAveragePooling2dTest(
9433     armnn::IWorkloadFactory& workloadFactory,
9434     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9435     const armnn::DataLayout dataLayout)
9436 {
9437     return SimpleAveragePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
9438 }
9439
9440 LayerTestResult<uint8_t, 4> SimpleAveragePooling2dUint8Test(
9441     armnn::IWorkloadFactory& workloadFactory,
9442     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9443     const armnn::DataLayout dataLayout)
9444 {
9445     return SimpleAveragePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
9446         workloadFactory, memoryManager, dataLayout, 0.5, -1);
9447 }
9448
9449 LayerTestResult<int16_t, 4> SimpleAveragePooling2dInt16Test(
9450     armnn::IWorkloadFactory& workloadFactory,
9451     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9452     const armnn::DataLayout dataLayout)
9453 {
9454     return SimpleAveragePooling2dTestCommon<armnn::DataType::QuantisedSymm16>(
9455             workloadFactory, memoryManager, dataLayout);
9456 }
9457
9458 LayerTestResult<float, 4> IgnorePaddingAveragePooling2dSize3x2Stride2x2Test(
9459     armnn::IWorkloadFactory& workloadFactory,
9460     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9461     bool forceNoPadding)
9462 {
9463     return IgnorePaddingAveragePooling2dSize3x2Stride2x2TestCommon<armnn::DataType::Float32>(
9464         workloadFactory, memoryManager, forceNoPadding);
9465 }
9466
9467 LayerTestResult<float, 4> LargeTensorsAveragePooling2dTest(
9468     armnn::IWorkloadFactory& workloadFactory,
9469     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9470 {
9471     return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
9472 }
9473
9474 LayerTestResult<uint8_t, 4> LargeTensorsAveragePooling2dUint8Test(
9475     armnn::IWorkloadFactory& workloadFactory,
9476     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9477 {
9478     return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
9479         workloadFactory, memoryManager, 0.5, -1);
9480 }
9481
9482 LayerTestResult<int16_t, 4> LargeTensorsAveragePooling2dInt16Test(
9483     armnn::IWorkloadFactory& workloadFactory,
9484     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9485 {
9486     return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::QuantisedSymm16>(
9487             workloadFactory, memoryManager);
9488 }
9489 LayerTestResult<float, 4> IgnorePaddingSimpleAveragePooling2dTest(
9490     armnn::IWorkloadFactory& workloadFactory,
9491     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9492 {
9493     return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
9494 }
9495
9496 LayerTestResult<uint8_t, 4> IgnorePaddingSimpleAveragePooling2dUint8Test(
9497     armnn::IWorkloadFactory& workloadFactory,
9498     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9499 {
9500     return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
9501             workloadFactory, memoryManager);
9502 }
9503
9504 LayerTestResult<int16_t, 4> IgnorePaddingSimpleAveragePooling2dInt16Test(
9505     armnn::IWorkloadFactory& workloadFactory,
9506     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9507 {
9508     return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::QuantisedSymm16>(
9509             workloadFactory, memoryManager);
9510 }
9511
9512 LayerTestResult<float, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingTest(
9513     armnn::IWorkloadFactory& workloadFactory,
9514     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9515 {
9516     return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::Float32>(
9517             workloadFactory, memoryManager);
9518 }
9519
9520 LayerTestResult<uint8_t, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingUint8Test(
9521     armnn::IWorkloadFactory& workloadFactory,
9522     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9523 {
9524     return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::QuantisedAsymm8>(
9525             workloadFactory, memoryManager);
9526 }
9527
9528 LayerTestResult<int16_t, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingInt16Test(
9529     armnn::IWorkloadFactory& workloadFactory,
9530     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9531 {
9532     return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::QuantisedSymm16>(
9533             workloadFactory, memoryManager);
9534 }
9535
9536 LayerTestResult<float, 4> IgnorePaddingAveragePooling2dSize3Test(
9537     armnn::IWorkloadFactory& workloadFactory,
9538     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9539 {
9540     return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
9541 }
9542
9543 LayerTestResult<uint8_t, 4> IgnorePaddingAveragePooling2dSize3Uint8Test(
9544     armnn::IWorkloadFactory& workloadFactory,
9545     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9546 {
9547     return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::QuantisedAsymm8>(
9548             workloadFactory, memoryManager);
9549 }
9550
9551 LayerTestResult<int16_t, 4> IgnorePaddingAveragePooling2dSize3Int16Test(
9552     armnn::IWorkloadFactory& workloadFactory,
9553     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9554 {
9555     return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::QuantisedSymm16>(
9556             workloadFactory, memoryManager);
9557 }
9558
9559 LayerTestResult<float, 4> SimpleL2Pooling2dTest(
9560     armnn::IWorkloadFactory& workloadFactory,
9561     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9562     const armnn::DataLayout dataLayout)
9563 {
9564     return SimpleL2Pooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
9565 }
9566
9567 LayerTestResult<uint8_t, 4> SimpleL2Pooling2dUint8Test(
9568     armnn::IWorkloadFactory& workloadFactory,
9569     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9570     const armnn::DataLayout dataLayout)
9571 {
9572     return SimpleL2Pooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, dataLayout);
9573 }
9574
9575 LayerTestResult<int16_t, 4> SimpleL2Pooling2dInt16Test(
9576     armnn::IWorkloadFactory& workloadFactory,
9577     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9578     const armnn::DataLayout dataLayout)
9579 {
9580     return SimpleL2Pooling2dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, dataLayout);
9581 }
9582
9583 LayerTestResult<float, 4> L2Pooling2dSize3Stride1Test(
9584     armnn::IWorkloadFactory& workloadFactory,
9585     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9586 {
9587     return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
9588 }
9589
9590 LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride1Uint8Test(
9591     armnn::IWorkloadFactory& workloadFactory,
9592     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9593 {
9594     return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9595 }
9596
9597 LayerTestResult<int16_t, 4> L2Pooling2dSize3Stride1Int16Test(
9598     armnn::IWorkloadFactory& workloadFactory,
9599     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9600 {
9601     return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9602 }
9603
9604 LayerTestResult<float, 4> L2Pooling2dSize3Stride3Test(
9605     armnn::IWorkloadFactory& workloadFactory,
9606     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9607 {
9608     return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
9609 }
9610
9611 LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride3Uint8Test(
9612     armnn::IWorkloadFactory& workloadFactory,
9613     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9614 {
9615     return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9616 }
9617
9618 LayerTestResult<int16_t, 4> L2Pooling2dSize3Stride3Int16Test(
9619     armnn::IWorkloadFactory& workloadFactory,
9620     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9621 {
9622     return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9623 }
9624 LayerTestResult<float, 4> L2Pooling2dSize3Stride4Test(
9625     armnn::IWorkloadFactory& workloadFactory,
9626     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9627 {
9628     return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
9629 }
9630
9631 LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride4Uint8Test(
9632     armnn::IWorkloadFactory& workloadFactory,
9633     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9634 {
9635     return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9636 }
9637
9638 LayerTestResult<int16_t, 4> L2Pooling2dSize3Stride4Int16Test(
9639     armnn::IWorkloadFactory& workloadFactory,
9640     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9641 {
9642     return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9643 }
9644
9645 LayerTestResult<float, 4> L2Pooling2dSize7Test(
9646     armnn::IWorkloadFactory& workloadFactory,
9647     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9648 {
9649     return L2Pooling2dSize7TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
9650 }
9651
9652 LayerTestResult<uint8_t, 4> L2Pooling2dSize7Uint8Test(
9653     armnn::IWorkloadFactory& workloadFactory,
9654     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9655 {
9656     return L2Pooling2dSize7TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9657 }
9658
9659 LayerTestResult<int16_t, 4> L2Pooling2dSize7Int16Test(
9660     armnn::IWorkloadFactory& workloadFactory,
9661     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9662 {
9663     return L2Pooling2dSize7TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9664 }
9665
9666 LayerTestResult<float, 4> L2Pooling2dSize9Test(
9667     armnn::IWorkloadFactory& workloadFactory,
9668     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9669 {
9670     return L2Pooling2dSize9TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
9671 }
9672
9673 LayerTestResult<uint8_t, 4> L2Pooling2dSize9Uint8Test(
9674     armnn::IWorkloadFactory& workloadFactory,
9675     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9676 {
9677     return L2Pooling2dSize9TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9678 }
9679
9680 LayerTestResult<int16_t, 4> L2Pooling2dSize9Int16Test(
9681     armnn::IWorkloadFactory& workloadFactory,
9682     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9683 {
9684     return L2Pooling2dSize9TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9685 }
9686 LayerTestResult<float, 4> IgnorePaddingSimpleL2Pooling2dTest(
9687     armnn::IWorkloadFactory& workloadFactory,
9688     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9689 {
9690     return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
9691 }
9692
9693 LayerTestResult<uint8_t, 4> IgnorePaddingSimpleL2Pooling2dUint8Test(
9694     armnn::IWorkloadFactory& workloadFactory,
9695     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9696 {
9697     return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9698 }
9699
9700 LayerTestResult<int16_t, 4> IgnorePaddingSimpleL2Pooling2dInt16Test(
9701     armnn::IWorkloadFactory& workloadFactory,
9702     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9703 {
9704     return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9705 }
9706
9707 LayerTestResult<float, 4> IgnorePaddingL2Pooling2dSize3Test(
9708     armnn::IWorkloadFactory& workloadFactory,
9709     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9710 {
9711     return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
9712 }
9713
9714 LayerTestResult<uint8_t, 4> IgnorePaddingL2Pooling2dSize3Uint8Test(
9715     armnn::IWorkloadFactory& workloadFactory,
9716     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9717 {
9718     return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9719 }
9720
9721 LayerTestResult<int16_t, 4> IgnorePaddingL2Pooling2dSize3Int16Test(
9722     armnn::IWorkloadFactory& workloadFactory,
9723     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9724 {
9725     return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9726 }
9727
9728 LayerTestResult<float, 4> AsymmetricNonSquarePooling2dTest(
9729     armnn::IWorkloadFactory& workloadFactory,
9730     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9731 {
9732     return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
9733 }
9734
9735 LayerTestResult<uint8_t, 4> AsymmetricNonSquarePooling2dUint8Test(
9736     armnn::IWorkloadFactory& workloadFactory,
9737     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9738 {
9739     return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9740 }
9741
9742 LayerTestResult<int16_t, 4> AsymmetricNonSquarePooling2dInt16Test(
9743     armnn::IWorkloadFactory& workloadFactory,
9744     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9745 {
9746     return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9747 }
9748
9749 LayerTestResult<float, 4> ComparePooling2dTest(
9750     armnn::IWorkloadFactory& workloadFactory,
9751     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9752     armnn::IWorkloadFactory& refWorkloadFactory,
9753     armnn::PoolingAlgorithm  poolingType)
9754 {
9755     return ComparePooling2dTestCommon<armnn::DataType::Float32>(
9756         workloadFactory, memoryManager, refWorkloadFactory, poolingType);
9757 }
9758
9759 LayerTestResult<uint8_t, 4> ComparePooling2dUint8Test(
9760     armnn::IWorkloadFactory& workloadFactory,
9761     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9762     armnn::IWorkloadFactory& refWorkloadFactory,
9763     armnn::PoolingAlgorithm  poolingType)
9764 {
9765     return ComparePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
9766         workloadFactory, memoryManager, refWorkloadFactory, poolingType, 0.1f, 128);
9767 }
9768
9769 LayerTestResult<int16_t, 4> ComparePooling2dInt16Test(
9770     armnn::IWorkloadFactory& workloadFactory,
9771     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9772     armnn::IWorkloadFactory& refWorkloadFactory,
9773     armnn::PoolingAlgorithm  poolingType)
9774 {
9775     return ComparePooling2dTestCommon<armnn::DataType::QuantisedSymm16>(
9776             workloadFactory, memoryManager, refWorkloadFactory, poolingType);
9777 }
9778
9779 LayerTestResult<float, 2> FullyConnectedLargeTest(
9780     armnn::IWorkloadFactory& workloadFactory,
9781     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9782     bool transposeWeights)
9783 {
9784     return FullyConnectedLargeTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, transposeWeights);
9785 }
9786
9787 LayerTestResult<float, 4> AdditionAfterMaxPoolTest(
9788     armnn::IWorkloadFactory& workloadFactory,
9789     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9790 {
9791     // Create Initial Tensor
9792     // 1, 2, 3
9793     // 4, 5, 6
9794     // 7, 8, 9
9795
9796     armnn::TensorInfo poolingInputTensorInfo({ 1, 1, 3, 3}, armnn::DataType::Float32);
9797     armnn::TensorInfo poolingOutputTensorInfo({ 1, 1, 2, 2}, armnn::DataType::Float32);
9798
9799     boost::multi_array<float, 4> poolingInput = MakeTensor<float,4>(poolingInputTensorInfo,
9800                                                             {1, 2, 3,
9801                                                              4, 5, 6,
9802                                                              7, 8, 9
9803                                                             });
9804
9805     std::unique_ptr<armnn::ITensorHandle> poolingInputHandle =
9806             workloadFactory.CreateTensorHandle(poolingInputTensorInfo);
9807     std::unique_ptr<armnn::ITensorHandle> poolingOutputHandle =
9808             workloadFactory.CreateTensorHandle(poolingOutputTensorInfo);
9809
9810     // Apply MaxPool poolSize = 1x1, stride=2x2
9811     // Result =
9812     // 1, 3
9813     // 7, 9
9814     armnn::Pooling2dDescriptor descriptor;
9815     descriptor.m_PoolHeight = 1;
9816     descriptor.m_PoolWidth = 1;
9817     descriptor.m_StrideX = 2;
9818     descriptor.m_StrideY = 2;
9819     descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
9820
9821     armnn::Pooling2dQueueDescriptor queueDescriptor;
9822     queueDescriptor.m_Parameters = descriptor;
9823     armnn::WorkloadInfo workloadInfo;
9824     AddInputToWorkload(queueDescriptor, workloadInfo, poolingInputTensorInfo, poolingInputHandle.get());
9825     AddOutputToWorkload(queueDescriptor, workloadInfo, poolingOutputTensorInfo, poolingOutputHandle.get());
9826
9827     // Create the MaxPool
9828     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePooling2d(queueDescriptor, workloadInfo);
9829
9830     //LayerTestResult<float, 4> result(poolingOutputTensorInfo);
9831     auto shape( GetTensorShapeAsArray<4>(poolingOutputTensorInfo));
9832     boost::multi_array<float, 4> resultMaxPool;
9833     resultMaxPool.resize(shape);
9834
9835
9836     // Create addition with another tensor the same size
9837     // This would be the result to apply a Conv2d with kernel ones(2) and stride 1x1
9838     // with the initial tensor.
9839     // 12, 16
9840     // 24, 28
9841
9842     armnn::TensorInfo addInputTensorInfo({ 1,1,2,2}, armnn::DataType::Float32);
9843     armnn::TensorInfo addOutputTensorInfo({ 1,1,2,2}, armnn::DataType::Float32);
9844
9845     boost::multi_array<float, 4> addInput = MakeTensor<float,4>(addInputTensorInfo,
9846                                                                     {12, 16,
9847                                                                      24, 28,
9848                                                                     });
9849
9850     // Expected output tensor after MaxPool and Addition.
9851     LayerTestResult<float,4> addRet(addOutputTensorInfo);
9852     addRet.outputExpected = MakeTensor<float, 4>(addOutputTensorInfo, std::vector<float>(
9853             {
9854                     13, 19,
9855                     31, 37
9856             }));
9857
9858     std::unique_ptr<armnn::ITensorHandle> addInputHandle = workloadFactory.CreateTensorHandle(addInputTensorInfo);
9859     std::unique_ptr<armnn::ITensorHandle> addOutputHandle = workloadFactory.CreateTensorHandle(addOutputTensorInfo);
9860
9861     armnn::AdditionQueueDescriptor data;
9862     armnn::WorkloadInfo info;
9863
9864     // Add the output of the MaxPool and the new tensor
9865     AddInputToWorkload(data, info, poolingOutputTensorInfo, poolingOutputHandle.get());
9866     AddInputToWorkload(data, info, addInputTensorInfo, addInputHandle.get());
9867     AddOutputToWorkload(data, info, addOutputTensorInfo, addOutputHandle.get());
9868
9869     std::unique_ptr<armnn::IWorkload> addWorkload = workloadFactory.CreateAddition(data, info);
9870
9871     poolingInputHandle->Allocate();
9872     poolingOutputHandle->Allocate();
9873     addInputHandle->Allocate();
9874     addOutputHandle->Allocate();
9875
9876     CopyDataToITensorHandle(poolingInputHandle.get(), &poolingInput[0][0][0][0]);
9877     CopyDataFromITensorHandle(&resultMaxPool[0][0][0][0], poolingOutputHandle.get());
9878
9879     CopyDataToITensorHandle(poolingOutputHandle.get(), &resultMaxPool[0][0][0][0]);
9880     CopyDataToITensorHandle(addInputHandle.get(), &addInput[0][0][0][0]);
9881
9882     workload->PostAllocationConfigure();
9883     workload->Execute();
9884     addWorkload->PostAllocationConfigure();
9885     addWorkload->Execute();
9886
9887     CopyDataFromITensorHandle(&addRet.output[0][0][0][0], addOutputHandle.get());
9888
9889     return addRet;
9890 }
9891
9892 LayerTestResult<float, 4> SpaceToBatchNdSimpleFloat32Test(
9893     armnn::IWorkloadFactory& workloadFactory,
9894     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9895 {
9896     return SpaceToBatchNdSimpleTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
9897 }
9898
9899 LayerTestResult<float, 4> SpaceToBatchNdMultiChannelsFloat32Test(
9900     armnn::IWorkloadFactory& workloadFactory,
9901     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9902 {
9903     return SpaceToBatchNdMultiChannelsTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
9904 }
9905
9906 LayerTestResult<float, 4> SpaceToBatchNdMultiBlockFloat32Test(
9907     armnn::IWorkloadFactory& workloadFactory,
9908     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9909 {
9910     return SpaceToBatchNdMultiBlockTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
9911 }
9912
9913 LayerTestResult<float, 4> SpaceToBatchNdPaddingFloat32Test(
9914     armnn::IWorkloadFactory& workloadFactory,
9915     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9916 {
9917     return SpaceToBatchNdPaddingTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
9918 }
9919
9920 LayerTestResult<uint8_t, 4> SpaceToBatchNdSimpleUint8Test(
9921     armnn::IWorkloadFactory& workloadFactory,
9922     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9923 {
9924     return SpaceToBatchNdSimpleTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9925 }
9926
9927 LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiChannelsUint8Test(
9928     armnn::IWorkloadFactory& workloadFactory,
9929     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9930 {
9931     return SpaceToBatchNdMultiChannelsTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9932 }
9933
9934 LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiBlockUint8Test(
9935     armnn::IWorkloadFactory& workloadFactory,
9936     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9937 {
9938     return SpaceToBatchNdMultiBlockTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9939 }
9940
9941 LayerTestResult<uint8_t, 4> SpaceToBatchNdPaddingUint8Test(
9942     armnn::IWorkloadFactory& workloadFactory,
9943     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9944 {
9945     return SpaceToBatchNdPaddingTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9946 }
9947
9948 LayerTestResult<float, 4> SpaceToBatchNdSimpleNHWCFloat32Test(
9949     armnn::IWorkloadFactory& workloadFactory,
9950     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9951 {
9952     return SpaceToBatchNdSimpleNHWCTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
9953 }
9954
9955 LayerTestResult<float, 4> SpaceToBatchNdMultiChannelsNHWCFloat32Test(
9956     armnn::IWorkloadFactory& workloadFactory,
9957     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9958 {
9959     return SpaceToBatchNdMultiChannelsNHWCTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
9960 }
9961
9962 LayerTestResult<float, 4> SpaceToBatchNdMultiBlockNHWCFloat32Test(
9963     armnn::IWorkloadFactory& workloadFactory,
9964     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9965 {
9966     return SpaceToBatchNdMultiBlockNHWCTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
9967 }
9968
9969 LayerTestResult<float, 4> SpaceToBatchNdPaddingNHWCFloat32Test(
9970     armnn::IWorkloadFactory& workloadFactory,
9971     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9972 {
9973     return SpaceToBatchNdPaddingNHWCTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
9974 }
9975
9976 LayerTestResult<uint8_t, 4> SpaceToBatchNdSimpleNHWCUint8Test(
9977     armnn::IWorkloadFactory& workloadFactory,
9978     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9979 {
9980     return SpaceToBatchNdSimpleNHWCTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9981 }
9982
9983 LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiChannelsNHWCUint8Test(
9984     armnn::IWorkloadFactory& workloadFactory,
9985     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9986 {
9987     return SpaceToBatchNdMultiChannelsNHWCTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9988 }
9989
9990 LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiBlockNHWCUint8Test(
9991     armnn::IWorkloadFactory& workloadFactory,
9992     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9993 {
9994     return SpaceToBatchNdMultiBlockNHWCTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9995 }
9996
9997 LayerTestResult<uint8_t, 4> SpaceToBatchNdPaddingNHWCUint8Test(
9998     armnn::IWorkloadFactory& workloadFactory,
9999     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10000 {
10001     return SpaceToBatchNdPaddingNHWCTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
10002 }
10003
10004 LayerTestResult<int16_t, 4> SpaceToBatchNdSimpleUint16Test(
10005         armnn::IWorkloadFactory& workloadFactory,
10006         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10007 {
10008     return SpaceToBatchNdSimpleTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
10009 }
10010
10011 LayerTestResult<int16_t, 4> SpaceToBatchNdMultiChannelsUint16Test(
10012         armnn::IWorkloadFactory& workloadFactory,
10013         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10014 {
10015     return SpaceToBatchNdMultiChannelsTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
10016 }
10017
10018 LayerTestResult<int16_t, 4> SpaceToBatchNdMultiBlockUint16Test(
10019         armnn::IWorkloadFactory& workloadFactory,
10020         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10021 {
10022     return SpaceToBatchNdMultiBlockTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
10023 }
10024
10025 LayerTestResult<int16_t, 4> SpaceToBatchNdPaddingUint16Test(
10026         armnn::IWorkloadFactory& workloadFactory,
10027         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10028 {
10029     return SpaceToBatchNdPaddingTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
10030 }
10031
10032 LayerTestResult<int16_t, 4> SpaceToBatchNdSimpleNHWCUint16Test(
10033         armnn::IWorkloadFactory& workloadFactory,
10034         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10035 {
10036     return SpaceToBatchNdSimpleNHWCTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
10037 }
10038
10039 LayerTestResult<int16_t, 4> SpaceToBatchNdMultiChannelsNHWCUint16Test(
10040         armnn::IWorkloadFactory& workloadFactory,
10041         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10042 {
10043     return SpaceToBatchNdMultiChannelsNHWCTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
10044 }
10045
10046 LayerTestResult<int16_t, 4> SpaceToBatchNdMultiBlockNHWCUint16Test(
10047         armnn::IWorkloadFactory& workloadFactory,
10048         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10049 {
10050     return SpaceToBatchNdMultiBlockNHWCTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
10051 }
10052
10053 LayerTestResult<int16_t, 4> SpaceToBatchNdPaddingNHWCUint16Test(
10054         armnn::IWorkloadFactory& workloadFactory,
10055         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10056 {
10057     return SpaceToBatchNdPaddingNHWCTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
10058 }
10059
10060 LayerTestResult<uint8_t, 4> SpaceToDepthNHWCAsymmQ8Test(
10061     armnn::IWorkloadFactory& workloadFactory,
10062     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10063 {
10064     return SpaceToDepthSimpleTest1<armnn::DataType::QuantisedAsymm8>(
10065         workloadFactory,
10066         memoryManager);
10067 }
10068
10069 LayerTestResult<uint8_t, 4> SpaceToDepthNCHWAsymmQ8Test(
10070     armnn::IWorkloadFactory& workloadFactory,
10071     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10072 {
10073     return SpaceToDepthSimpleTest1<armnn::DataType::QuantisedAsymm8>(
10074         workloadFactory,
10075         memoryManager,
10076         armnn::DataLayout::NCHW);
10077 }
10078
10079 LayerTestResult<float, 4> SpaceToDepthNHWCFloat32Test1(
10080     armnn::IWorkloadFactory& workloadFactory,
10081     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10082 {
10083     return SpaceToDepthSimpleTest1<armnn::DataType::Float32>(
10084         workloadFactory,
10085         memoryManager);
10086 }
10087
10088 LayerTestResult<float, 4> SpaceToDepthNCHWFloat32Test1(
10089     armnn::IWorkloadFactory& workloadFactory,
10090     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10091 {
10092     return SpaceToDepthSimpleTest1<armnn::DataType::Float32>(
10093         workloadFactory,
10094         memoryManager,
10095         armnn::DataLayout::NCHW);
10096 }
10097
10098 LayerTestResult<float, 4> SpaceToDepthNHWCFloat32Test2(
10099     armnn::IWorkloadFactory& workloadFactory,
10100     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10101 {
10102     return SpaceToDepthSimpleTest2<armnn::DataType::Float32>(
10103         workloadFactory,
10104         memoryManager);
10105 }
10106
10107 LayerTestResult<float, 4> SpaceToDepthNCHWFloat32Test2(
10108     armnn::IWorkloadFactory& workloadFactory,
10109     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10110 {
10111     return SpaceToDepthSimpleTest2<armnn::DataType::Float32>(
10112         workloadFactory,
10113         memoryManager,
10114         armnn::DataLayout::NCHW);
10115 }
10116
10117 LayerTestResult<int16_t, 4> SpaceToDepthNHWCQSymm16Test(
10118     armnn::IWorkloadFactory& workloadFactory,
10119     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10120 {
10121     return SpaceToDepthSimpleTest2<armnn::DataType::QuantisedSymm16>(
10122         workloadFactory,
10123         memoryManager);
10124 }
10125
10126 LayerTestResult<int16_t, 4> SpaceToDepthNCHWQSymm16Test(
10127     armnn::IWorkloadFactory& workloadFactory,
10128     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10129 {
10130     return SpaceToDepthSimpleTest2<armnn::DataType::QuantisedSymm16>(
10131         workloadFactory,
10132         memoryManager,
10133         armnn::DataLayout::NCHW);
10134 }
10135
10136 namespace {
10137
10138 } // anonymous namespace
10139
10140 LayerTestResult<float, 4> StridedSlice4DFloat32Test(
10141     armnn::IWorkloadFactory& workloadFactory,
10142     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10143 {
10144     return StridedSlice4DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
10145 }
10146
10147 LayerTestResult<float, 4> StridedSlice4DReverseFloat32Test(
10148     armnn::IWorkloadFactory& workloadFactory,
10149     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10150 {
10151     return StridedSlice4DReverseTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
10152 }
10153
10154 LayerTestResult<float, 4> StridedSliceSimpleStrideFloat32Test(
10155     armnn::IWorkloadFactory& workloadFactory,
10156     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10157 {
10158     return StridedSliceSimpleStrideTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
10159 }
10160
10161 LayerTestResult<float, 4> StridedSliceSimpleRangeMaskFloat32Test(
10162     armnn::IWorkloadFactory& workloadFactory,
10163     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10164 {
10165     return StridedSliceSimpleRangeMaskTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
10166 }
10167
10168 LayerTestResult<float, 2> StridedSliceShrinkAxisMaskFloat32Test(
10169     armnn::IWorkloadFactory& workloadFactory,
10170     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10171 {
10172     return StridedSliceShrinkAxisMaskTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
10173 }
10174
10175 LayerTestResult<float, 3> StridedSlice3DFloat32Test(
10176     armnn::IWorkloadFactory& workloadFactory,
10177     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10178 {
10179     return StridedSlice3DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
10180 }
10181
10182 LayerTestResult<float, 3> StridedSlice3DReverseFloat32Test(
10183     armnn::IWorkloadFactory& workloadFactory,
10184     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10185 {
10186     return StridedSlice3DReverseTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
10187 }
10188
10189 LayerTestResult<float, 2> StridedSlice2DFloat32Test(
10190     armnn::IWorkloadFactory& workloadFactory,
10191     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10192 {
10193     return StridedSlice2DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
10194 }
10195
10196 LayerTestResult<float, 2> StridedSlice2DReverseFloat32Test(
10197     armnn::IWorkloadFactory& workloadFactory,
10198     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10199 {
10200     return StridedSlice2DReverseTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
10201 }
10202
10203 LayerTestResult<uint8_t, 4> StridedSlice4DUint8Test(
10204     armnn::IWorkloadFactory& workloadFactory,
10205     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10206 {
10207     return StridedSlice4DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
10208 }
10209
10210 LayerTestResult<uint8_t, 4> StridedSlice4DReverseUint8Test(
10211     armnn::IWorkloadFactory& workloadFactory,
10212     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10213 {
10214     return StridedSlice4DReverseTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
10215 }
10216
10217 LayerTestResult<uint8_t, 4> StridedSliceSimpleStrideUint8Test(
10218     armnn::IWorkloadFactory& workloadFactory,
10219     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10220 {
10221     return StridedSliceSimpleStrideTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
10222 }
10223
10224 LayerTestResult<uint8_t, 4> StridedSliceSimpleRangeMaskUint8Test(
10225     armnn::IWorkloadFactory& workloadFactory,
10226     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10227 {
10228     return StridedSliceSimpleRangeMaskTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
10229 }
10230
10231 LayerTestResult<uint8_t, 2> StridedSliceShrinkAxisMaskUint8Test(
10232     armnn::IWorkloadFactory& workloadFactory,
10233     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10234 {
10235     return StridedSliceShrinkAxisMaskTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
10236 }
10237
10238 LayerTestResult<uint8_t, 3> StridedSlice3DUint8Test(
10239     armnn::IWorkloadFactory& workloadFactory,
10240     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10241 {
10242     return StridedSlice3DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
10243 }
10244
10245 LayerTestResult<uint8_t, 3> StridedSlice3DReverseUint8Test(
10246     armnn::IWorkloadFactory& workloadFactory,
10247     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10248 {
10249     return StridedSlice3DReverseTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
10250 }
10251
10252 LayerTestResult<uint8_t, 2> StridedSlice2DUint8Test(
10253     armnn::IWorkloadFactory& workloadFactory,
10254     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10255 {
10256     return StridedSlice2DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
10257 }
10258
10259 LayerTestResult<uint8_t, 2> StridedSlice2DReverseUint8Test(
10260     armnn::IWorkloadFactory& workloadFactory,
10261     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10262 {
10263     return StridedSlice2DReverseTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
10264 }
10265
10266 LayerTestResult<int16_t, 4> StridedSlice4DInt16Test(
10267     armnn::IWorkloadFactory& workloadFactory,
10268     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10269 {
10270     return StridedSlice4DTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
10271 }
10272
10273 LayerTestResult<int16_t, 4> StridedSlice4DReverseInt16Test(
10274     armnn::IWorkloadFactory& workloadFactory,
10275     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10276 {
10277     return StridedSlice4DReverseTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
10278 }
10279
10280 LayerTestResult<int16_t, 4> StridedSliceSimpleStrideInt16Test(
10281     armnn::IWorkloadFactory& workloadFactory,
10282     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10283 {
10284     return StridedSliceSimpleStrideTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
10285 }
10286
10287 LayerTestResult<int16_t, 4> StridedSliceSimpleRangeMaskInt16Test(
10288     armnn::IWorkloadFactory& workloadFactory,
10289     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10290 {
10291     return StridedSliceSimpleRangeMaskTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
10292 }
10293
10294 LayerTestResult<int16_t, 2> StridedSliceShrinkAxisMaskInt16Test(
10295     armnn::IWorkloadFactory& workloadFactory,
10296     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10297 {
10298     return StridedSliceShrinkAxisMaskTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
10299 }
10300
10301 LayerTestResult<int16_t, 3> StridedSlice3DInt16Test(
10302     armnn::IWorkloadFactory& workloadFactory,
10303     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10304 {
10305     return StridedSlice3DTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
10306 }
10307
10308 LayerTestResult<int16_t, 3> StridedSlice3DReverseInt16Test(
10309     armnn::IWorkloadFactory& workloadFactory,
10310     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10311 {
10312     return StridedSlice3DReverseTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
10313 }
10314
10315 LayerTestResult<int16_t, 2> StridedSlice2DInt16Test(
10316     armnn::IWorkloadFactory& workloadFactory,
10317     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10318 {
10319     return StridedSlice2DTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
10320 }
10321
10322 LayerTestResult<int16_t, 2> StridedSlice2DReverseInt16Test(
10323     armnn::IWorkloadFactory& workloadFactory,
10324     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10325 {
10326     return StridedSlice2DReverseTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
10327 }
10328
10329 LayerTestResult<float, 4> Debug4DFloat32Test(
10330     armnn::IWorkloadFactory& workloadFactory,
10331     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10332 {
10333     return Debug4DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
10334 }
10335
10336 LayerTestResult<float, 3> Debug3DFloat32Test(
10337     armnn::IWorkloadFactory& workloadFactory,
10338     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10339 {
10340     return Debug3DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
10341 }
10342
10343 LayerTestResult<float, 2> Debug2DFloat32Test(
10344     armnn::IWorkloadFactory& workloadFactory,
10345     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10346 {
10347     return Debug2DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
10348 }
10349
10350 LayerTestResult<float, 1> Debug1DFloat32Test(
10351     armnn::IWorkloadFactory& workloadFactory,
10352     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10353 {
10354     return Debug1DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
10355 }
10356
10357 LayerTestResult<uint8_t, 4> Debug4DUint8Test(
10358     armnn::IWorkloadFactory& workloadFactory,
10359     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10360 {
10361     return Debug4DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
10362 }
10363
10364 LayerTestResult<uint8_t, 3> Debug3DUint8Test(
10365     armnn::IWorkloadFactory& workloadFactory,
10366     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10367 {
10368     return Debug3DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
10369 }
10370
10371 LayerTestResult<uint8_t, 2> Debug2DUint8Test(
10372     armnn::IWorkloadFactory& workloadFactory,
10373     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10374 {
10375     return Debug2DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
10376 }
10377
10378 LayerTestResult<uint8_t, 1> Debug1DUint8Test(
10379     armnn::IWorkloadFactory& workloadFactory,
10380     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10381 {
10382     return Debug1DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
10383 }
10384
10385 LayerTestResult<float, 1> Gather1DParamsFloatTest(
10386     armnn::IWorkloadFactory& workloadFactory,
10387     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10388 {
10389     return Gather1DParamsTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager);
10390 }
10391
10392 LayerTestResult<uint8_t, 1> Gather1DParamsUint8Test(
10393     armnn::IWorkloadFactory& workloadFactory,
10394     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10395 {
10396     return Gather1DParamsTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
10397 }
10398
10399 LayerTestResult<int16_t, 1> Gather1DParamsInt16Test(
10400         armnn::IWorkloadFactory& workloadFactory,
10401         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10402 {
10403     return Gather1DParamsTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
10404 }
10405
10406 LayerTestResult<float, 2> GatherMultiDimParamsFloatTest(
10407     armnn::IWorkloadFactory& workloadFactory,
10408     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10409 {
10410     return GatherMultiDimParamsTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager);
10411 }
10412
10413 LayerTestResult<uint8_t, 2> GatherMultiDimParamsUint8Test(
10414     armnn::IWorkloadFactory& workloadFactory,
10415     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10416 {
10417     return GatherMultiDimParamsTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
10418 }
10419
10420 LayerTestResult<int16_t, 2> GatherMultiDimParamsInt16Test(
10421         armnn::IWorkloadFactory& workloadFactory,
10422         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10423 {
10424     return GatherMultiDimParamsTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
10425 }
10426
10427 LayerTestResult<float, 4> GatherMultiDimParamsMultiDimIndicesFloatTest(
10428     armnn::IWorkloadFactory& workloadFactory,
10429     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10430 {
10431     return GatherMultiDimParamsMultiDimIndicesTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager);
10432 }
10433
10434 LayerTestResult<uint8_t, 4> GatherMultiDimParamsMultiDimIndicesUint8Test(
10435     armnn::IWorkloadFactory& workloadFactory,
10436     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10437 {
10438     return GatherMultiDimParamsMultiDimIndicesTestImpl<armnn::DataType::QuantisedAsymm8>(
10439         workloadFactory, memoryManager);
10440 }
10441
10442 LayerTestResult<int16_t, 4> GatherMultiDimParamsMultiDimIndicesInt16Test(
10443         armnn::IWorkloadFactory& workloadFactory,
10444         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10445 {
10446     return GatherMultiDimParamsMultiDimIndicesTestImpl<armnn::DataType::QuantisedSymm16>(
10447             workloadFactory, memoryManager);
10448 }
10449
10450 LayerTestResult<float, 4> DequantizeSimpleUint8Test(
10451     armnn::IWorkloadFactory& workloadFactory,
10452     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10453 {
10454     return DequantizeSimpleTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
10455 }
10456
10457 LayerTestResult<float, 4> DequantizeOffsetUint8Test(
10458     armnn::IWorkloadFactory& workloadFactory,
10459     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10460 {
10461     return DequantizeOffsetTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
10462 }
10463
10464 LayerTestResult<float, 4> DequantizeSimpleInt16Test(
10465     armnn::IWorkloadFactory& workloadFactory,
10466     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10467 {
10468     return DequantizeSimpleTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
10469 }
10470
10471 LayerTestResult<uint8_t, 4> QuantizeSimpleUint8Test(
10472     armnn::IWorkloadFactory& workloadFactory,
10473     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10474 {
10475     return QuantizeSimpleTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
10476 }
10477
10478 LayerTestResult<uint8_t, 4> QuantizeClampUint8Test(
10479     armnn::IWorkloadFactory& workloadFactory,
10480     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10481 {
10482     return QuantizeClampTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
10483 }
10484
10485 LayerTestResult<int16_t, 4> QuantizeClampInt16Test(
10486     armnn::IWorkloadFactory& workloadFactory,
10487     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10488 {
10489     return QuantizeClampTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
10490 }