IVGCVSW-1865 - Support NHWC for Convolution2D (CpuRef)
[platform/upstream/armnn.git] / src / backends / reference / test / RefCreateWorkloadTests.cpp
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5
6 #include <armnn/test/CreateWorkload.hpp>
7
8 #include <backends/CpuTensorHandle.hpp>
9 #include <backends/reference/RefWorkloadFactory.hpp>
10 #include <backends/reference/workloads/RefWorkloads.hpp>
11
12 namespace
13 {
14
15 template<typename Workload>
16 void CheckInputOutput(std::unique_ptr<Workload> workload, const TensorInfo& inputInfo, const TensorInfo& outputInfo)
17 {
18     auto queueDescriptor = workload->GetData();
19     auto inputHandle  = boost::polymorphic_downcast<ConstCpuTensorHandle*>(queueDescriptor.m_Inputs[0]);
20     auto outputHandle = boost::polymorphic_downcast<CpuTensorHandle*>(queueDescriptor.m_Outputs[0]);
21     BOOST_TEST((inputHandle->GetTensorInfo() == inputInfo));
22     BOOST_TEST((outputHandle->GetTensorInfo() == outputInfo));
23 }
24
25 template <typename Workload>
26 void CheckInputsOutput(std::unique_ptr<Workload> workload,
27                        const TensorInfo&         inputInfo0,
28                        const TensorInfo&         inputInfo1,
29                        const TensorInfo&         outputInfo)
30 {
31     auto queueDescriptor = workload->GetData();
32     auto inputHandle0     = boost::polymorphic_downcast<ConstCpuTensorHandle*>(queueDescriptor.m_Inputs[0]);
33     auto inputHandle1     = boost::polymorphic_downcast<ConstCpuTensorHandle*>(queueDescriptor.m_Inputs[1]);
34     auto outputHandle    = boost::polymorphic_downcast<CpuTensorHandle*>(queueDescriptor.m_Outputs[0]);
35     BOOST_TEST((inputHandle0->GetTensorInfo() == inputInfo0));
36     BOOST_TEST((inputHandle1->GetTensorInfo() == inputInfo1));
37     BOOST_TEST((outputHandle->GetTensorInfo() == outputInfo));
38 }
39 }
40
41 BOOST_AUTO_TEST_SUITE(CreateWorkloadRef)
42
43 template <typename ActivationWorkloadType, armnn::DataType DataType>
44 static void RefCreateActivationWorkloadTest()
45 {
46     Graph graph;
47     RefWorkloadFactory factory;
48     auto workload = CreateActivationWorkloadTest<ActivationWorkloadType, DataType>(factory, graph);
49
50     // Checks that outputs are as we expect them (see definition of CreateActivationWorkloadTest).
51     CheckInputOutput(std::move(workload),
52         TensorInfo({ 1, 1 }, DataType),
53         TensorInfo({ 1, 1 }, DataType));
54 }
55
56 BOOST_AUTO_TEST_CASE(CreateActivationFloat32Workload)
57 {
58     RefCreateActivationWorkloadTest<RefActivationFloat32Workload, armnn::DataType::Float32>();
59 }
60
61 BOOST_AUTO_TEST_CASE(CreateActivationUint8Workload)
62 {
63     RefCreateActivationWorkloadTest<RefActivationUint8Workload, armnn::DataType::QuantisedAsymm8>();
64 }
65
66 template <typename WorkloadType,
67           typename DescriptorType,
68           typename LayerType,
69           armnn::DataType DataType>
70 static void RefCreateArithmethicWorkloadTest()
71 {
72     Graph graph;
73     RefWorkloadFactory factory;
74     auto workload = CreateArithmeticWorkloadTest<WorkloadType, DescriptorType, LayerType, DataType>(factory, graph);
75
76     CheckInputsOutput(std::move(workload),
77         TensorInfo({ 2, 3 }, DataType),
78         TensorInfo({ 2, 3 }, DataType),
79         TensorInfo({ 2, 3 }, DataType));
80 }
81
82 BOOST_AUTO_TEST_CASE(CreateAdditionFloatWorkload)
83 {
84     RefCreateArithmethicWorkloadTest<RefAdditionFloat32Workload,
85                                      AdditionQueueDescriptor,
86                                      AdditionLayer,
87                                      armnn::DataType::Float32>();
88 }
89
90 BOOST_AUTO_TEST_CASE(CreateAdditionUint8Workload)
91 {
92     RefCreateArithmethicWorkloadTest<RefAdditionUint8Workload,
93                                      AdditionQueueDescriptor,
94                                      AdditionLayer,
95                                      armnn::DataType::QuantisedAsymm8>();
96 }
97
98 BOOST_AUTO_TEST_CASE(CreateSubtractionFloatWorkload)
99 {
100     RefCreateArithmethicWorkloadTest<RefSubtractionFloat32Workload,
101                                      SubtractionQueueDescriptor,
102                                      SubtractionLayer,
103                                      armnn::DataType::Float32>();
104 }
105
106 BOOST_AUTO_TEST_CASE(CreateSubtractionUint8Workload)
107 {
108     RefCreateArithmethicWorkloadTest<RefSubtractionUint8Workload,
109                                      SubtractionQueueDescriptor,
110                                      SubtractionLayer,
111                                      armnn::DataType::QuantisedAsymm8>();
112 }
113
114 BOOST_AUTO_TEST_CASE(CreateMultiplicationFloatWorkload)
115 {
116     RefCreateArithmethicWorkloadTest<RefMultiplicationFloat32Workload,
117                                      MultiplicationQueueDescriptor,
118                                      MultiplicationLayer,
119                                      armnn::DataType::Float32>();
120 }
121
122 BOOST_AUTO_TEST_CASE(CreateMultiplicationUint8Workload)
123 {
124     RefCreateArithmethicWorkloadTest<RefMultiplicationUint8Workload,
125                                      MultiplicationQueueDescriptor,
126                                      MultiplicationLayer,
127                                      armnn::DataType::QuantisedAsymm8>();
128 }
129
130 BOOST_AUTO_TEST_CASE(CreateDivisionFloatWorkload)
131 {
132     RefCreateArithmethicWorkloadTest<RefDivisionFloat32Workload,
133                                      DivisionQueueDescriptor,
134                                      DivisionLayer,
135                                      armnn::DataType::Float32>();
136 }
137
138 BOOST_AUTO_TEST_CASE(CreateDivisionUint8Workload)
139 {
140     RefCreateArithmethicWorkloadTest<RefDivisionUint8Workload,
141                                      DivisionQueueDescriptor,
142                                      DivisionLayer,
143                                      armnn::DataType::QuantisedAsymm8>();
144 }
145
146 BOOST_AUTO_TEST_CASE(CreateBatchNormalizationWorkload)
147 {
148     Graph                graph;
149     RefWorkloadFactory factory;
150     auto workload = CreateBatchNormalizationWorkloadTest<RefBatchNormalizationFloat32Workload, armnn::DataType::Float32>
151                     (factory, graph);
152
153     // Checks that outputs and inputs are as we expect them (see definition of CreateBatchNormalizationWorkloadTest).
154     CheckInputOutput(
155         std::move(workload), TensorInfo({2, 3, 1, 1}, DataType::Float32), TensorInfo({2, 3, 1, 1}, DataType::Float32));
156 }
157
158 BOOST_AUTO_TEST_CASE(CreateConvertFp16ToFp32Float32Workload)
159 {
160     Graph                graph;
161     RefWorkloadFactory factory;
162     auto workload = CreateConvertFp16ToFp32WorkloadTest<RefConvertFp16ToFp32Workload>(factory, graph);
163
164     // Checks that outputs and inputs are as we expect them
165     CheckInputOutput(
166         std::move(workload), TensorInfo({1, 3, 2, 3}, DataType::Float16), TensorInfo({1, 3, 2, 3}, DataType::Float32));
167 }
168
169 BOOST_AUTO_TEST_CASE(CreateConvertFp32ToFp16Float16Workload)
170 {
171     Graph                graph;
172     RefWorkloadFactory factory;
173     auto workload = CreateConvertFp32ToFp16WorkloadTest<RefConvertFp32ToFp16Workload>(factory, graph);
174
175     // Checks that outputs and inputs are as we expect them
176     CheckInputOutput(
177         std::move(workload), TensorInfo({1, 3, 2, 3}, DataType::Float32), TensorInfo({1, 3, 2, 3}, DataType::Float16));
178 }
179
180 static void RefCreateConvolution2dWorkloadTest(DataLayout dataLayout = DataLayout::NCHW)
181 {
182     Graph graph;
183     RefWorkloadFactory factory;
184     auto workload = CreateConvolution2dWorkloadTest<RefConvolution2dFloat32Workload, DataType::Float32>
185                     (factory, graph, dataLayout);
186
187     std::initializer_list<unsigned int> inputShape  = (dataLayout == DataLayout::NCHW) ?
188         std::initializer_list<unsigned int>({2, 3, 8, 16}) : std::initializer_list<unsigned int>({2, 8, 16, 3});
189     std::initializer_list<unsigned int> outputShape = (dataLayout == DataLayout::NCHW) ?
190         std::initializer_list<unsigned int>({2, 2, 2, 10}) : std::initializer_list<unsigned int>({2, 2, 10, 2});
191
192     // Checks that outputs and inputs are as we expect them (see definition of CreateConvolution2dWorkloadTest).
193     CheckInputOutput(std::move(workload),
194                      TensorInfo(inputShape, DataType::Float32),
195                      TensorInfo(outputShape, DataType::Float32));
196 }
197
198 BOOST_AUTO_TEST_CASE(CreateConvolution2dFloatNchwWorkload)
199 {
200     RefCreateConvolution2dWorkloadTest(DataLayout::NCHW);
201 }
202
203 BOOST_AUTO_TEST_CASE(CreateConvolution2dFloatNhwcWorkload)
204 {
205     RefCreateConvolution2dWorkloadTest(DataLayout::NHWC);
206 }
207
208 template <typename FullyConnectedWorkloadType, armnn::DataType DataType>
209 static void RefCreateFullyConnectedWorkloadTest()
210 {
211     Graph graph;
212     RefWorkloadFactory factory;
213     auto workload = CreateFullyConnectedWorkloadTest<FullyConnectedWorkloadType, DataType>(factory, graph);
214
215     // Checks that outputs and inputs are as we expect them (see definition of CreateFullyConnectedWorkloadTest).
216     float inputsQScale = DataType == armnn::DataType::QuantisedAsymm8 ? 1.0f : 0.0;
217     float outputQScale = DataType == armnn::DataType::QuantisedAsymm8 ? 2.0f : 0.0;
218     CheckInputOutput(std::move(workload),
219         TensorInfo({ 3, 1, 4, 5 }, DataType, inputsQScale),
220         TensorInfo({ 3, 7 }, DataType, outputQScale));
221 }
222
223 BOOST_AUTO_TEST_CASE(CreateFullyConnectedFloat32Workload)
224 {
225     RefCreateFullyConnectedWorkloadTest<RefFullyConnectedFloat32Workload, armnn::DataType::Float32>();
226 }
227
228 BOOST_AUTO_TEST_CASE(CreateFullyConnectedUint8Workload)
229 {
230     RefCreateFullyConnectedWorkloadTest<RefFullyConnectedUint8Workload, armnn::DataType::QuantisedAsymm8>();
231 }
232
233 template <typename NormalizationWorkloadType, armnn::DataType DataType>
234 static void RefCreateNormalizationWorkloadTest()
235 {
236     Graph graph;
237     RefWorkloadFactory factory;
238     auto workload = CreateNormalizationWorkloadTest<NormalizationWorkloadType, DataType>(factory, graph);
239
240     // Checks that outputs and inputs are as we expect them (see definition of CreateNormalizationWorkloadTest).
241     CheckInputOutput(std::move(workload),
242                      TensorInfo({3, 5, 5, 1}, DataType),
243                      TensorInfo({3, 5, 5, 1}, DataType));
244 }
245
246 BOOST_AUTO_TEST_CASE(CreateRefNormalizationNchwWorkload)
247 {
248     RefCreateNormalizationWorkloadTest<RefNormalizationFloat32Workload, armnn::DataType::Float32>();
249 }
250
251 template <typename Pooling2dWorkloadType, armnn::DataType DataType>
252 static void RefCreatePooling2dWorkloadTest()
253 {
254     Graph graph;
255     RefWorkloadFactory factory;
256     auto workload = CreatePooling2dWorkloadTest<Pooling2dWorkloadType, DataType>(factory, graph);
257
258     // Checks that outputs and inputs are as we expect them (see definition of CreatePooling2dWorkloadTest).
259     CheckInputOutput(
260         std::move(workload),
261         TensorInfo({3, 2, 5, 5}, DataType),
262         TensorInfo({3, 2, 2, 4}, DataType));
263 }
264
265 BOOST_AUTO_TEST_CASE(CreatePooling2dFloat32Workload)
266 {
267     RefCreatePooling2dWorkloadTest<RefPooling2dFloat32Workload, armnn::DataType::Float32>();
268 }
269
270 BOOST_AUTO_TEST_CASE(CreatePooling2dUint8Workload)
271 {
272     RefCreatePooling2dWorkloadTest<RefPooling2dUint8Workload, armnn::DataType::QuantisedAsymm8>();
273 }
274
275 template <typename SoftmaxWorkloadType, armnn::DataType DataType>
276 static void RefCreateSoftmaxWorkloadTest()
277 {
278     Graph graph;
279     RefWorkloadFactory factory;
280     auto workload = CreateSoftmaxWorkloadTest<SoftmaxWorkloadType, DataType>(factory, graph);
281
282     // Checks that outputs and inputs are as we expect them (see definition of CreateSoftmaxWorkloadTest).
283     CheckInputOutput(
284         std::move(workload),
285         TensorInfo({4, 1}, DataType),
286         TensorInfo({4, 1}, DataType));
287 }
288
289 BOOST_AUTO_TEST_CASE(CreateSoftmaxFloat32Workload)
290 {
291     RefCreateSoftmaxWorkloadTest<RefSoftmaxFloat32Workload, armnn::DataType::Float32>();
292 }
293
294 BOOST_AUTO_TEST_CASE(CreateSoftmaxUint8Workload)
295 {
296     RefCreateSoftmaxWorkloadTest<RefSoftmaxUint8Workload, armnn::DataType::QuantisedAsymm8>();
297 }
298
299 template <typename SplitterWorkloadType, armnn::DataType DataType>
300 static void RefCreateSplitterWorkloadTest()
301 {
302     Graph graph;
303     RefWorkloadFactory factory;
304     auto workload = CreateSplitterWorkloadTest<SplitterWorkloadType, DataType>(factory, graph);
305
306     // Checks that outputs are as we expect them (see definition of CreateSplitterWorkloadTest).
307     SplitterQueueDescriptor queueDescriptor = workload->GetData();
308     auto inputHandle = boost::polymorphic_downcast<ConstCpuTensorHandle*>(queueDescriptor.m_Inputs[0]);
309     BOOST_TEST((inputHandle->GetTensorInfo() == TensorInfo({ 5, 7, 7 }, DataType)));
310
311     auto outputHandle0 = boost::polymorphic_downcast<CpuTensorHandle*>(queueDescriptor.m_Outputs[0]);
312     BOOST_TEST((outputHandle0->GetTensorInfo() == TensorInfo({ 1, 7, 7 }, DataType)));
313
314     auto outputHandle1 = boost::polymorphic_downcast<CpuTensorHandle*>(queueDescriptor.m_Outputs[1]);
315     BOOST_TEST((outputHandle1->GetTensorInfo() == TensorInfo({ 2, 7, 7 }, DataType)));
316
317     auto outputHandle2 = boost::polymorphic_downcast<CpuTensorHandle*>(queueDescriptor.m_Outputs[2]);
318     BOOST_TEST((outputHandle2->GetTensorInfo() == TensorInfo({ 2, 7, 7 }, DataType)));
319 }
320
321 BOOST_AUTO_TEST_CASE(CreateSplitterFloat32Workload)
322 {
323     RefCreateSplitterWorkloadTest<RefSplitterFloat32Workload, armnn::DataType::Float32>();
324 }
325
326 BOOST_AUTO_TEST_CASE(CreateSplitterUint8Workload)
327 {
328     RefCreateSplitterWorkloadTest<RefSplitterUint8Workload, armnn::DataType::QuantisedAsymm8>();
329 }
330
331 template <typename SplitterWorkloadType, typename MergerWorkloadType, armnn::DataType DataType>
332 static void RefCreateSplitterMergerWorkloadTest()
333 {
334     // Tests that it is possible to decide which output of the splitter layer
335     // should be lined to which input of the merger layer.
336     // We tested that is is possible to specify 0th output
337     // of the splitter to be the 1st input to the merger and the 1st output of the splitter to be 0th input
338     // of the merger.
339
340     Graph graph;
341     RefWorkloadFactory factory;
342     auto workloads = CreateSplitterMergerWorkloadTest<SplitterWorkloadType, MergerWorkloadType, DataType>
343         (factory, graph);
344
345     auto wlSplitter = std::move(workloads.first);
346     auto wlMerger = std::move(workloads.second);
347
348     //Checks that the index of inputs/outputs matches what we declared on InputDescriptor construction.
349     armnn::CpuTensorHandle* sOut0 = dynamic_cast<armnn::CpuTensorHandle*>(wlSplitter->GetData().m_Outputs[0]);
350     armnn::CpuTensorHandle* sOut1 = dynamic_cast<armnn::CpuTensorHandle*>(wlSplitter->GetData().m_Outputs[1]);
351     armnn::CpuTensorHandle* mIn0 = dynamic_cast<armnn::CpuTensorHandle*>(wlMerger->GetData().m_Inputs[0]);
352     armnn::CpuTensorHandle* mIn1 = dynamic_cast<armnn::CpuTensorHandle*>(wlMerger->GetData().m_Inputs[1]);
353
354     BOOST_TEST(sOut0);
355     BOOST_TEST(sOut1);
356     BOOST_TEST(mIn0);
357     BOOST_TEST(mIn1);
358
359     bool validDataPointers = (sOut0 == mIn1) && (sOut1 == mIn0);
360
361     BOOST_TEST(validDataPointers);
362 }
363
364 BOOST_AUTO_TEST_CASE(CreateSplitterMergerFloat32)
365 {
366     RefCreateSplitterMergerWorkloadTest<RefSplitterFloat32Workload, RefMergerFloat32Workload, DataType::Float32>();
367 }
368
369 BOOST_AUTO_TEST_CASE(CreateSplitterMergerUint8)
370 {
371     RefCreateSplitterMergerWorkloadTest<RefSplitterUint8Workload, RefMergerUint8Workload, DataType::QuantisedAsymm8>();
372 }
373
374 template <typename SplitterWorkloadType, typename ActivationWorkloadType, armnn::DataType DataType>
375 static void RefCreateSingleOutputMultipleInputsTest()
376 {
377     // Tests that it is possible to assign multiple (two) different layers to each of the outputs of a splitter layer.
378     // We created a splitter with two outputs. That each of those outputs is used by two different activation layers.
379
380     Graph graph;
381     RefWorkloadFactory factory;
382     std::unique_ptr<SplitterWorkloadType> wlSplitter;
383     std::unique_ptr<ActivationWorkloadType> wlActiv0_0;
384     std::unique_ptr<ActivationWorkloadType> wlActiv0_1;
385     std::unique_ptr<ActivationWorkloadType> wlActiv1_0;
386     std::unique_ptr<ActivationWorkloadType> wlActiv1_1;
387
388     CreateSplitterMultipleInputsOneOutputWorkloadTest<SplitterWorkloadType,
389         ActivationWorkloadType, DataType>(factory, graph, wlSplitter, wlActiv0_0, wlActiv0_1, wlActiv1_0, wlActiv1_1);
390
391     armnn::CpuTensorHandle* sOut0 = dynamic_cast<armnn::CpuTensorHandle*>(wlSplitter->GetData().m_Outputs[0]);
392     armnn::CpuTensorHandle* sOut1 = dynamic_cast<armnn::CpuTensorHandle*>(wlSplitter->GetData().m_Outputs[1]);
393     armnn::CpuTensorHandle* activ0_0Im = dynamic_cast<armnn::CpuTensorHandle*>(wlActiv0_0->GetData().m_Inputs[0]);
394     armnn::CpuTensorHandle* activ0_1Im = dynamic_cast<armnn::CpuTensorHandle*>(wlActiv0_1->GetData().m_Inputs[0]);
395     armnn::CpuTensorHandle* activ1_0Im = dynamic_cast<armnn::CpuTensorHandle*>(wlActiv1_0->GetData().m_Inputs[0]);
396     armnn::CpuTensorHandle* activ1_1Im = dynamic_cast<armnn::CpuTensorHandle*>(wlActiv1_1->GetData().m_Inputs[0]);
397
398
399     BOOST_TEST(sOut0);
400     BOOST_TEST(sOut1);
401     BOOST_TEST(activ0_0Im);
402     BOOST_TEST(activ0_1Im);
403     BOOST_TEST(activ1_0Im);
404     BOOST_TEST(activ1_1Im);
405
406     bool validDataPointers = (sOut0 == activ0_0Im) && (sOut0 == activ0_1Im) &&
407                              (sOut1 == activ1_0Im) && (sOut1 == activ1_1Im);
408
409     BOOST_TEST(validDataPointers);
410 }
411
412 BOOST_AUTO_TEST_CASE(CreateSingleOutputMultipleInputsFloat32)
413 {
414     RefCreateSingleOutputMultipleInputsTest<RefSplitterFloat32Workload, RefActivationFloat32Workload,
415         armnn::DataType::Float32>();
416 }
417
418 BOOST_AUTO_TEST_CASE(CreateSingleOutputMultipleInputsUint8)
419 {
420     RefCreateSingleOutputMultipleInputsTest<RefSplitterUint8Workload, RefActivationUint8Workload,
421         armnn::DataType::QuantisedAsymm8>();
422 }
423
424 template <typename ResizeBilinearWorkloadType, armnn::DataType DataType>
425 static void RefCreateResizeBilinearTest(DataLayout dataLayout)
426 {
427     Graph graph;
428     RefWorkloadFactory factory;
429     auto workload = CreateResizeBilinearWorkloadTest<ResizeBilinearWorkloadType, DataType>(factory, graph, dataLayout);
430
431     TensorShape inputShape;
432     TensorShape outputShape;
433
434     switch (dataLayout)
435     {
436         case DataLayout::NHWC:
437             inputShape  = { 2, 4, 4, 3 };
438             outputShape = { 2, 2, 2, 3 };
439             break;
440         default: // NCHW
441             inputShape  = { 2, 3, 4, 4 };
442             outputShape = { 2, 3, 2, 2 };
443     }
444
445     // Checks that outputs and inputs are as we expect them (see definition of CreateResizeBilinearWorkloadTest).
446     CheckInputOutput(
447             std::move(workload),
448             TensorInfo(inputShape, DataType),
449             TensorInfo(outputShape, DataType));
450 }
451
452 BOOST_AUTO_TEST_CASE(CreateResizeBilinearFloat32)
453 {
454     RefCreateResizeBilinearTest<RefResizeBilinearFloat32Workload, armnn::DataType::Float32>(DataLayout::NCHW);
455 }
456
457 BOOST_AUTO_TEST_CASE(CreateResizeBilinearUint8)
458 {
459     RefCreateResizeBilinearTest<RefResizeBilinearUint8Workload, armnn::DataType::QuantisedAsymm8>(DataLayout::NCHW);
460 }
461
462 BOOST_AUTO_TEST_CASE(CreateResizeBilinearFloat32Nhwc)
463 {
464     RefCreateResizeBilinearTest<RefResizeBilinearFloat32Workload, armnn::DataType::Float32>(DataLayout::NHWC);
465 }
466
467 template <typename L2NormalizationWorkloadType, armnn::DataType DataType>
468 static void RefCreateL2NormalizationTest(DataLayout dataLayout)
469 {
470     Graph graph;
471     RefWorkloadFactory factory;
472     auto workload =
473             CreateL2NormalizationWorkloadTest<L2NormalizationWorkloadType, DataType>(factory, graph, dataLayout);
474
475     TensorShape inputShape;
476     TensorShape outputShape;
477
478     switch (dataLayout)
479     {
480         case DataLayout::NHWC:
481             inputShape  = { 5, 50, 67, 20 };
482             outputShape = { 5, 50, 67, 20 };
483             break;
484         case DataLayout::NCHW:
485         default:
486             inputShape  = { 5, 20, 50, 67 };
487             outputShape = { 5, 20, 50, 67 };
488             break;
489     }
490
491     // Checks that outputs and inputs are as we expect them (see definition of CreateL2NormalizationWorkloadTest).
492     CheckInputOutput(std::move(workload), TensorInfo(inputShape, DataType), TensorInfo(outputShape, DataType));
493 }
494
495 BOOST_AUTO_TEST_CASE(CreateL2NormalizationFloat32)
496 {
497     RefCreateL2NormalizationTest<RefL2NormalizationFloat32Workload, armnn::DataType::Float32>(DataLayout::NCHW);
498 }
499
500 BOOST_AUTO_TEST_CASE(CreateL2NormalizationFloat32Nhwc)
501 {
502     RefCreateL2NormalizationTest<RefL2NormalizationFloat32Workload, armnn::DataType::Float32>(DataLayout::NHWC);
503 }
504
505 template <typename ReshapeWorkloadType, armnn::DataType DataType>
506 static void RefCreateReshapeWorkloadTest()
507 {
508     Graph graph;
509     RefWorkloadFactory factory;
510     auto workload = CreateReshapeWorkloadTest<ReshapeWorkloadType, DataType>(factory, graph);
511
512     // Checks that outputs and inputs are as we expect them (see definition of CreateReshapeWorkloadTest).
513     CheckInputOutput(
514         std::move(workload),
515         TensorInfo({ 4, 1 }, DataType),
516         TensorInfo({ 1, 4 }, DataType));
517 }
518
519 BOOST_AUTO_TEST_CASE(CreateReshapeFloat32Workload)
520 {
521     RefCreateReshapeWorkloadTest<RefReshapeFloat32Workload, armnn::DataType::Float32>();
522 }
523
524 BOOST_AUTO_TEST_CASE(CreateReshapeUint8Workload)
525 {
526     RefCreateReshapeWorkloadTest<RefReshapeUint8Workload, armnn::DataType::QuantisedAsymm8>();
527 }
528
529 BOOST_AUTO_TEST_SUITE_END()