IVGCVSW-1946: Remove armnn/src from the include paths
[platform/upstream/armnn.git] / src / armnn / test / CreateWorkload.hpp
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #pragma once
6
7 #include <boost/test/unit_test.hpp>
8
9 #include <boost/cast.hpp>
10
11 #include <backendsCommon/WorkloadData.hpp>
12 #include <backendsCommon/WorkloadFactory.hpp>
13 #include <backendsCommon/CpuTensorHandle.hpp>
14
15 #include <Graph.hpp>
16
17 #include <utility>
18
19
20 using namespace armnn;
21
22 namespace
23 {
24
25 using namespace std;
26
27 // Calls CreateWorkload for a layer, and checks the returned pointer is of the correct type.
28 template<typename Workload>
29 std::unique_ptr<Workload> MakeAndCheckWorkload(Layer& layer, Graph& graph, const IWorkloadFactory& factory)
30 {
31     std::unique_ptr<IWorkload> workload = layer.CreateWorkload(graph, factory);
32     BOOST_TEST(workload.get() == boost::polymorphic_downcast<Workload*>(workload.get()),
33                "Cannot convert to derived class");
34     std::string reasonIfUnsupported;
35     layer.SetBackendId(factory.GetBackendId());
36     BOOST_TEST(factory.IsLayerSupported(layer, layer.GetDataType(), reasonIfUnsupported));
37     return std::unique_ptr<Workload>(static_cast<Workload*>(workload.release()));
38 }
39
40 // Connects two layers.
41 void Connect(Layer* from, Layer* to, const TensorInfo& tensorInfo, unsigned int fromIndex = 0, unsigned int toIndex = 0)
42 {
43     from->GetOutputSlot(fromIndex).Connect(to->GetInputSlot(toIndex));
44     from->GetOutputHandler(fromIndex).SetTensorInfo(tensorInfo);
45 }
46
47 // Helper function to create tensor handlers for workloads, assuming they all use the same factory.
48 void CreateTensorHandles(armnn::Graph& graph, armnn::IWorkloadFactory& factory)
49 {
50     for (auto&& layer : graph.TopologicalSort())
51     {
52         layer->CreateTensorHandles(graph, factory);
53     }
54 }
55
56 /////////////////////////////////////////////////////////////////////////////////////////////
57 // The following functions are called by backendsCommon/test/CreateWorkload*.cpp
58 // They build very simple graphs, and then create a workload.
59 // Some checks are performed on the workload to ensure parameters have been passed correctly.
60 // They return the created workloads so that backend-specific checks can be performed.
61 /////////////////////////////////////////////////////////////////////////////////////////////
62
63 template <typename ActivationWorkload, armnn::DataType DataType>
64 std::unique_ptr<ActivationWorkload> CreateActivationWorkloadTest(armnn::IWorkloadFactory& factory,
65                                                                  armnn::Graph&            graph)
66 {
67     // Creates the layer we're testing.
68     ActivationDescriptor layerDesc;
69     layerDesc.m_Function = ActivationFunction::Abs;
70     layerDesc.m_A        = 3.5f;
71     layerDesc.m_B        = -10.0f;
72
73     ActivationLayer* const layer = graph.AddLayer<ActivationLayer>(layerDesc, "layer");
74
75     // Creates extra layers.
76     Layer* const input = graph.AddLayer<InputLayer>(0, "input");
77     Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
78
79     // Connects up.
80     armnn::TensorInfo tensorInfo({1, 1}, DataType);
81
82     Connect(input, layer, tensorInfo);
83     Connect(layer, output, tensorInfo);
84
85     CreateTensorHandles(graph, factory);
86
87     // Makes the workload and checks it.
88     auto workload = MakeAndCheckWorkload<ActivationWorkload>(*layer, graph, factory);
89
90     ActivationQueueDescriptor queueDescriptor = workload->GetData();
91     BOOST_TEST(queueDescriptor.m_Inputs.size() == 1);
92     BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
93     BOOST_TEST(queueDescriptor.m_Parameters.m_A == 3.5f);
94     BOOST_TEST(queueDescriptor.m_Parameters.m_B == -10.0f);
95     BOOST_TEST((queueDescriptor.m_Parameters.m_Function == ActivationFunction::Abs));
96
97     // Returns so we can do extra, backend-specific tests.
98     return workload;
99 }
100
101 template <typename WorkloadType,
102           typename DescriptorType,
103           typename LayerType,
104           armnn::DataType DataType>
105 std::unique_ptr<WorkloadType> CreateArithmeticWorkloadTest(armnn::IWorkloadFactory& factory,
106                                                            armnn::Graph&            graph)
107 {
108     // Creates the layer we're testing.
109     Layer* const layer = graph.AddLayer<LayerType>("layer");
110
111     // Creates extra layers.
112     Layer* const input1 = graph.AddLayer<InputLayer>(1, "input1");
113     Layer* const input2 = graph.AddLayer<InputLayer>(2, "input2");
114     Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
115
116     // Connects up.
117     armnn::TensorInfo tensorInfo({2, 3}, DataType);
118     Connect(input1, layer, tensorInfo, 0, 0);
119     Connect(input2, layer, tensorInfo, 0, 1);
120     Connect(layer, output, tensorInfo);
121     CreateTensorHandles(graph, factory);
122
123     // Makes the workload and checks it.
124     auto workload = MakeAndCheckWorkload<WorkloadType>(*layer, graph, factory);
125
126     DescriptorType queueDescriptor = workload->GetData();
127     BOOST_TEST(queueDescriptor.m_Inputs.size() == 2);
128     BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
129
130     // Returns so we can do extra, backend-specific tests.
131     return workload;
132 }
133
134 template <typename BatchNormalizationFloat32Workload, armnn::DataType DataType>
135 std::unique_ptr<BatchNormalizationFloat32Workload> CreateBatchNormalizationWorkloadTest(
136     armnn::IWorkloadFactory& factory, armnn::Graph& graph, DataLayoutIndexed dataLayout = DataLayout::NCHW)
137 {
138
139     TensorShape tensorShape;
140     switch (dataLayout.GetDataLayout())
141     {
142         case DataLayout::NHWC:
143             tensorShape = { 2, 4, 4, 3 };
144             break;
145         case DataLayout::NCHW:
146         default:
147             tensorShape = { 2, 3, 4, 4 };
148     }
149
150     // Creates the layer we're testing.
151     BatchNormalizationDescriptor layerDesc;
152     layerDesc.m_Eps = 0.05f;
153     layerDesc.m_DataLayout = dataLayout;
154
155     BatchNormalizationLayer* const layer = graph.AddLayer<BatchNormalizationLayer>(layerDesc, "layer");
156
157     armnn::TensorInfo weightInfo({3}, DataType);
158     layer->m_Mean     = std::make_unique<ScopedCpuTensorHandle>(weightInfo);
159     layer->m_Variance = std::make_unique<ScopedCpuTensorHandle>(weightInfo);
160     layer->m_Beta     = std::make_unique<ScopedCpuTensorHandle>(weightInfo);
161     layer->m_Gamma    = std::make_unique<ScopedCpuTensorHandle>(weightInfo);
162     layer->m_Mean->Allocate();
163     layer->m_Variance->Allocate();
164     layer->m_Beta->Allocate();
165     layer->m_Gamma->Allocate();
166
167     // Creates extra layers.
168     Layer* const input = graph.AddLayer<InputLayer>(0, "input");
169     Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
170
171     //Connects up.
172     armnn::TensorInfo tensorInfo(tensorShape, DataType);
173     Connect(input, layer, tensorInfo);
174     Connect(layer, output, tensorInfo);
175     CreateTensorHandles(graph, factory);
176
177     // Makes the workload and checks it.
178     auto workload = MakeAndCheckWorkload<BatchNormalizationFloat32Workload>(*layer, graph, factory);
179     BatchNormalizationQueueDescriptor queueDescriptor = workload->GetData();
180     BOOST_TEST(queueDescriptor.m_Parameters.m_Eps == 0.05f);
181     BOOST_TEST(queueDescriptor.m_Inputs.size() == 1);
182     BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
183     BOOST_TEST((queueDescriptor.m_Mean->GetTensorInfo() == TensorInfo({3}, DataType)));
184     BOOST_TEST((queueDescriptor.m_Variance->GetTensorInfo() == TensorInfo({3}, DataType)));
185     BOOST_TEST((queueDescriptor.m_Gamma->GetTensorInfo() == TensorInfo({3}, DataType)));
186     BOOST_TEST((queueDescriptor.m_Beta->GetTensorInfo() == TensorInfo({3}, DataType)));
187     BOOST_TEST((queueDescriptor.m_Parameters.m_DataLayout.GetDataLayout() == dataLayout));
188
189     // Returns so we can do extra, backend-specific tests.
190     return workload;
191 }
192
193 template <typename Convolution2dWorkload, armnn::DataType DataType>
194 std::unique_ptr<Convolution2dWorkload> CreateConvolution2dWorkloadTest(armnn::IWorkloadFactory& factory,
195                                                                        armnn::Graph&            graph,
196                                                                        DataLayout dataLayout = DataLayout::NCHW)
197 {
198     // Creates the layer we're testing.
199     Convolution2dDescriptor layerDesc;
200     layerDesc.m_PadLeft = 3;
201     layerDesc.m_PadRight = 3;
202     layerDesc.m_PadTop = 1;
203     layerDesc.m_PadBottom = 1;
204     layerDesc.m_StrideX = 2;
205     layerDesc.m_StrideY = 4;
206     layerDesc.m_BiasEnabled = true;
207     layerDesc.m_DataLayout = dataLayout;
208
209     Convolution2dLayer* const layer = graph.AddLayer<Convolution2dLayer>(layerDesc, "layer");
210
211     TensorShape weightShape = (dataLayout == DataLayout::NCHW) ? TensorShape{2, 3, 5, 3} : TensorShape{2, 5, 3, 3};
212     TensorShape inputShape  = (dataLayout == DataLayout::NCHW) ? TensorShape{2, 3, 8, 16} : TensorShape{2, 8, 16, 3};
213     TensorShape outputShape = (dataLayout == DataLayout::NCHW) ? TensorShape{2, 2, 2, 10} : TensorShape{2, 2, 10, 2};
214
215     layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(TensorInfo(weightShape, DataType));
216     layer->m_Bias   = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({2}, GetBiasDataType(DataType)));
217
218     layer->m_Weight->Allocate();
219     layer->m_Bias->Allocate();
220
221     // Creates extra layers.
222     Layer* const input = graph.AddLayer<InputLayer>(0, "input");
223     Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
224
225     // Connects up.
226     Connect(input, layer, TensorInfo(inputShape, DataType));
227     Connect(layer, output, TensorInfo(outputShape, DataType));
228     CreateTensorHandles(graph, factory);
229
230     // Makes the workload and checks it.
231     auto workload = MakeAndCheckWorkload<Convolution2dWorkload>(*layer, graph, factory);
232
233     Convolution2dQueueDescriptor queueDescriptor = workload->GetData();
234     BOOST_TEST(queueDescriptor.m_Parameters.m_StrideX == 2);
235     BOOST_TEST(queueDescriptor.m_Parameters.m_StrideY == 4);
236     BOOST_TEST(queueDescriptor.m_Parameters.m_PadLeft == 3);
237     BOOST_TEST(queueDescriptor.m_Parameters.m_PadRight == 3);
238     BOOST_TEST(queueDescriptor.m_Parameters.m_PadTop == 1);
239     BOOST_TEST(queueDescriptor.m_Parameters.m_PadBottom == 1);
240     BOOST_TEST(queueDescriptor.m_Parameters.m_BiasEnabled);
241     BOOST_TEST((queueDescriptor.m_Parameters.m_DataLayout == dataLayout));
242
243     BOOST_TEST(queueDescriptor.m_Inputs.size() == 1);
244     BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
245     BOOST_TEST((queueDescriptor.m_Weight->GetTensorInfo() == TensorInfo(weightShape, DataType)));
246     BOOST_TEST((queueDescriptor.m_Bias->GetTensorInfo() ==
247         TensorInfo({2}, GetBiasDataType(DataType))));
248
249     // Returns so we can do extra, backend-specific tests.
250     return workload;
251 }
252
253 template <typename LstmWorkload>
254 std::unique_ptr<LstmWorkload> CreateLstmWorkloadTest(armnn::IWorkloadFactory& factory, armnn::Graph& graph)
255 {
256     // This parameter setting is for withCifgWithPeepholeNoProjection
257     LstmDescriptor layerDesc;
258     layerDesc.m_ActivationFunc = 4;
259     layerDesc.m_ClippingThresCell = 0.0f;
260     layerDesc.m_ClippingThresProj = 0.0f;
261     layerDesc.m_CifgEnabled = true;
262     layerDesc.m_PeepholeEnabled = true;
263     layerDesc.m_ProjectionEnabled = false;
264
265     LstmLayer* const layer = graph.AddLayer<LstmLayer>(layerDesc, "layer");
266     unsigned int batchSize = 2;
267     unsigned int inputSize = 2;
268     unsigned int numUnits = 4;
269     unsigned int outputSize = 4;
270
271     layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique<ScopedCpuTensorHandle>
272             (TensorInfo({ numUnits, inputSize }, DataType::Float32));
273     layer->m_BasicParameters.m_InputToCellWeights = std::make_unique<ScopedCpuTensorHandle>
274             (TensorInfo({ numUnits, inputSize }, DataType::Float32));
275     layer->m_BasicParameters.m_InputToOutputWeights = std::make_unique<ScopedCpuTensorHandle>
276             (TensorInfo({ numUnits, inputSize }, DataType::Float32));
277     layer->m_BasicParameters.m_RecurrentToForgetWeights = std::make_unique<ScopedCpuTensorHandle>
278             (TensorInfo({ numUnits, outputSize }, DataType::Float32));
279     layer->m_BasicParameters.m_RecurrentToCellWeights = std::make_unique<ScopedCpuTensorHandle>
280             (TensorInfo({ numUnits, outputSize }, DataType::Float32));
281     layer->m_BasicParameters.m_RecurrentToOutputWeights = std::make_unique<ScopedCpuTensorHandle>
282             (TensorInfo({ numUnits, outputSize }, DataType::Float32));
283     layer->m_BasicParameters.m_ForgetGateBias = std::make_unique<ScopedCpuTensorHandle>
284             (TensorInfo({ numUnits }, DataType::Float32));
285     layer->m_BasicParameters.m_CellBias = std::make_unique<ScopedCpuTensorHandle>
286             (TensorInfo({ numUnits }, DataType::Float32));
287     layer->m_BasicParameters.m_OutputGateBias = std::make_unique<ScopedCpuTensorHandle>
288             (TensorInfo({ numUnits }, DataType::Float32));
289
290     layer->m_BasicParameters.m_InputToForgetWeights->Allocate();
291     layer->m_BasicParameters.m_InputToCellWeights->Allocate();
292     layer->m_BasicParameters.m_InputToOutputWeights->Allocate();
293     layer->m_BasicParameters.m_RecurrentToForgetWeights->Allocate();
294     layer->m_BasicParameters.m_RecurrentToCellWeights->Allocate();
295     layer->m_BasicParameters.m_RecurrentToOutputWeights->Allocate();
296     layer->m_BasicParameters.m_ForgetGateBias->Allocate();
297     layer->m_BasicParameters.m_CellBias->Allocate();
298     layer->m_BasicParameters.m_OutputGateBias->Allocate();
299
300
301     if (layerDesc.m_PeepholeEnabled)
302     {
303         layer->m_PeepholeParameters.m_CellToForgetWeights = std::make_unique<ScopedCpuTensorHandle>
304                 (TensorInfo({ numUnits }, DataType::Float32));
305         layer->m_PeepholeParameters.m_CellToOutputWeights = std::make_unique<ScopedCpuTensorHandle>
306                 (TensorInfo({ numUnits }, DataType::Float32));
307         layer->m_PeepholeParameters.m_CellToForgetWeights->Allocate();
308         layer->m_PeepholeParameters.m_CellToOutputWeights->Allocate();
309     }
310
311     // create input and output layers
312     Layer* const input = graph.AddLayer<InputLayer>(0, "input");
313     Layer* const outputStateIn = graph.AddLayer<InputLayer>(1, "outputStateIn");
314     Layer* const cellStateIn = graph.AddLayer<InputLayer>(2, "cellStateIn");
315     Layer* const scratchBuffer = graph.AddLayer<OutputLayer>(0, "scratchBuffer");
316     Layer* const outputStateOut = graph.AddLayer<OutputLayer>(1, "outputStateOut");
317     Layer* const cellStateOut = graph.AddLayer<OutputLayer>(2, "cellStateOut");
318     Layer* const output = graph.AddLayer<OutputLayer>(3, "output");
319
320     // connect up
321     armnn::TensorInfo lstmTensorInfo1({ batchSize, inputSize }, DataType::Float32);
322     armnn::TensorInfo lstmTensorInfo2({ batchSize, numUnits}, DataType::Float32);
323     armnn::TensorInfo lstmTensorInfo3({ batchSize, outputSize }, DataType::Float32);
324     armnn::TensorInfo lstmTensorInfoScratchBuff({ batchSize, numUnits*3 }, DataType::Float32);
325     if (layerDesc.m_CifgEnabled)
326     {
327         lstmTensorInfoScratchBuff.SetShape({ batchSize, numUnits*4 });
328     }
329
330     Connect(input, layer, lstmTensorInfo1, 0, 0);
331     Connect(cellStateIn, layer, lstmTensorInfo2, 0, 1);
332     Connect(outputStateIn, layer, lstmTensorInfo3, 0, 2);
333     Connect(layer, scratchBuffer, lstmTensorInfoScratchBuff, 0, 0);
334     Connect(layer, outputStateOut, lstmTensorInfo3, 1, 0);
335     Connect(layer, cellStateOut, lstmTensorInfo2, 2, 0);
336     Connect(layer, output, lstmTensorInfo3, 3, 0);
337
338     CreateTensorHandles(graph, factory);
339
340     // make the workload and check it
341     auto workload = MakeAndCheckWorkload<LstmWorkload>(*layer, graph, factory);
342     LstmQueueDescriptor queueDescriptor = workload->GetData();
343     BOOST_TEST(queueDescriptor.m_Parameters.m_ActivationFunc == 4);
344     BOOST_TEST(queueDescriptor.m_Parameters.m_ClippingThresCell == 0.0f);
345     BOOST_TEST(queueDescriptor.m_Parameters.m_ClippingThresProj == 0.0f);
346     BOOST_TEST(queueDescriptor.m_Inputs.size() == 3);
347     BOOST_TEST(queueDescriptor.m_Outputs.size() == 4);
348
349     BOOST_TEST((queueDescriptor.m_InputToForgetWeights->GetTensorInfo() == TensorInfo({ numUnits, inputSize },
350                                                                                      DataType::Float32)));
351     BOOST_TEST((queueDescriptor.m_OutputGateBias->GetTensorInfo() == TensorInfo({ numUnits },
352                                                                                      DataType::Float32)));
353     BOOST_TEST((queueDescriptor.m_CellBias->GetTensorInfo() == TensorInfo({ numUnits }, DataType::Float32)));
354     return workload;
355 }
356
357 template <typename Convolution2dWorkload, armnn::DataType DataType>
358 std::unique_ptr<Convolution2dWorkload> CreateDirectConvolution2dWorkloadTest(armnn::IWorkloadFactory& factory,
359                                                                        armnn::Graph&            graph)
360 {
361     // Creates the layer we're testing.
362     Convolution2dDescriptor layerDesc;
363     layerDesc.m_PadLeft = 1;
364     layerDesc.m_PadRight = 1;
365     layerDesc.m_PadTop = 1;
366     layerDesc.m_PadBottom = 1;
367     layerDesc.m_StrideX = 1;
368     layerDesc.m_StrideY = 1;
369     layerDesc.m_BiasEnabled = true;
370
371     Convolution2dLayer* const layer = graph.AddLayer<Convolution2dLayer>(layerDesc, "layer");
372
373     float inputsQScale = DataType == armnn::DataType::QuantisedAsymm8 ? 1.0f : 0.0;
374     float outputQScale = DataType == armnn::DataType::QuantisedAsymm8 ? 2.0f : 0.0;
375
376     layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({ 2, 3, 3, 3 }, DataType, inputsQScale));
377     layer->m_Bias   = std::make_unique<ScopedCpuTensorHandle>
378         (TensorInfo({2},  GetBiasDataType(DataType), inputsQScale));
379     layer->m_Weight->Allocate();
380     layer->m_Bias->Allocate();
381
382     // Creates extra layers.
383     Layer* const input = graph.AddLayer<InputLayer>(0, "input");
384     Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
385
386     // Connects up.
387     Connect(input, layer, TensorInfo({2, 3, 6, 6}, DataType, inputsQScale));
388     Connect(layer, output, TensorInfo({2, 2, 6, 6}, DataType, outputQScale));
389     CreateTensorHandles(graph, factory);
390
391     // Makes the workload and checks it.
392     auto workload = MakeAndCheckWorkload<Convolution2dWorkload>(*layer, graph, factory);
393
394     Convolution2dQueueDescriptor queueDescriptor = workload->GetData();
395     BOOST_TEST(queueDescriptor.m_Parameters.m_StrideX == 1);
396     BOOST_TEST(queueDescriptor.m_Parameters.m_StrideY == 1);
397     BOOST_TEST(queueDescriptor.m_Parameters.m_PadLeft == 1);
398     BOOST_TEST(queueDescriptor.m_Parameters.m_PadRight == 1);
399     BOOST_TEST(queueDescriptor.m_Parameters.m_PadTop == 1);
400     BOOST_TEST(queueDescriptor.m_Parameters.m_PadBottom == 1);
401     BOOST_TEST(queueDescriptor.m_Parameters.m_BiasEnabled == true);
402
403     BOOST_TEST(queueDescriptor.m_Inputs.size() == 1);
404     BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
405     BOOST_TEST((queueDescriptor.m_Weight->GetTensorInfo() == TensorInfo({2, 3, 3, 3},
406         DataType, inputsQScale)));
407     BOOST_TEST((queueDescriptor.m_Bias->GetTensorInfo()
408                 == TensorInfo({2},  GetBiasDataType(DataType), inputsQScale)));
409
410     // Returns so we can do extra, backend-specific tests.
411     return workload;
412 }
413
414 template <typename DepthwiseConvolution2dFloat32Workload, armnn::DataType DataType>
415 std::unique_ptr<DepthwiseConvolution2dFloat32Workload> CreateDepthwiseConvolution2dWorkloadTest(
416     armnn::IWorkloadFactory& factory, armnn::Graph& graph, DataLayout dataLayout = DataLayout::NCHW)
417 {
418     // Creates the layer we're testing.
419     DepthwiseConvolution2dDescriptor layerDesc;
420     layerDesc.m_PadLeft         = 1;
421     layerDesc.m_PadRight        = 2;
422     layerDesc.m_PadTop          = 1;
423     layerDesc.m_PadBottom       = 2;
424     layerDesc.m_StrideX         = 1;
425     layerDesc.m_StrideY         = 1;
426     layerDesc.m_BiasEnabled     = false;
427     layerDesc.m_DataLayout = dataLayout;
428
429     DepthwiseConvolution2dLayer* const layer = graph.AddLayer<DepthwiseConvolution2dLayer>(layerDesc, "layer");
430
431     layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({1, 4, 4, 2}, DataType));
432     layer->m_Weight->Allocate();
433
434     // Creates extra layers.
435     Layer* const input = graph.AddLayer<InputLayer>(0, "input");
436     Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
437
438     TensorShape inputShape = (dataLayout == DataLayout::NCHW) ?
439                 TensorShape{ 2, 2, 5, 5 } : TensorShape{ 2, 5, 5, 2 };
440     TensorShape outputShape = (dataLayout == DataLayout::NCHW) ?
441                 TensorShape{ 2, 2, 5, 5 } : TensorShape{ 2, 5, 5, 2 };
442
443     // Connects up.
444     Connect(input, layer, TensorInfo(inputShape, DataType));
445     Connect(layer, output, TensorInfo(outputShape, DataType));
446     CreateTensorHandles(graph, factory);
447
448     // Makes the workload and checks it.
449     auto workload = MakeAndCheckWorkload<DepthwiseConvolution2dFloat32Workload>(*layer, graph, factory);
450
451     DepthwiseConvolution2dQueueDescriptor queueDescriptor = workload->GetData();
452     BOOST_TEST(queueDescriptor.m_Parameters.m_StrideX == 1);
453     BOOST_TEST(queueDescriptor.m_Parameters.m_StrideY == 1);
454     BOOST_TEST(queueDescriptor.m_Parameters.m_PadLeft == 1);
455     BOOST_TEST(queueDescriptor.m_Parameters.m_PadRight == 2);
456     BOOST_TEST(queueDescriptor.m_Parameters.m_PadTop == 1);
457     BOOST_TEST(queueDescriptor.m_Parameters.m_PadBottom == 2);
458     BOOST_TEST(queueDescriptor.m_Parameters.m_BiasEnabled == false);
459     BOOST_TEST((queueDescriptor.m_Parameters.m_DataLayout == dataLayout));
460
461     BOOST_TEST(queueDescriptor.m_Inputs.size() == 1);
462     BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
463     BOOST_TEST((queueDescriptor.m_Weight->GetTensorInfo() == TensorInfo({1, 4, 4, 2}, DataType)));
464
465     // Returns so we can do extra, backend-specific tests.
466     return workload;
467 }
468
469 template <typename FullyConnectedWorkload, armnn::DataType DataType>
470 std::unique_ptr<FullyConnectedWorkload> CreateFullyConnectedWorkloadTest(armnn::IWorkloadFactory& factory,
471                                                                          armnn::Graph&            graph)
472 {
473     // Creates the layer we're testing.
474     FullyConnectedDescriptor layerDesc;
475     layerDesc.m_BiasEnabled = true;
476     layerDesc.m_TransposeWeightMatrix = true;
477
478     FullyConnectedLayer* const layer = graph.AddLayer<FullyConnectedLayer>(layerDesc, "layer");
479
480     float inputsQScale = DataType == armnn::DataType::QuantisedAsymm8 ? 1.0f : 0.0;
481     float outputQScale = DataType == armnn::DataType::QuantisedAsymm8 ? 2.0f : 0.0;
482
483     layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({7, 20}, DataType, inputsQScale, 0));
484     layer->m_Bias   = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({7}, GetBiasDataType(DataType), inputsQScale));
485     layer->m_Weight->Allocate();
486     layer->m_Bias->Allocate();
487
488     // Creates extra layers.
489     Layer* const input = graph.AddLayer<InputLayer>(0, "input");
490     Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
491
492     // Connects up.
493     Connect(input, layer, TensorInfo({3, 1, 4, 5}, DataType, inputsQScale));
494     Connect(layer, output, TensorInfo({3, 7}, DataType, outputQScale));
495     CreateTensorHandles(graph, factory);
496
497     // Makes the workload and checks it.
498     auto workload = MakeAndCheckWorkload<FullyConnectedWorkload>(*layer, graph, factory);
499
500     FullyConnectedQueueDescriptor queueDescriptor = workload->GetData();
501     BOOST_TEST(queueDescriptor.m_Parameters.m_BiasEnabled == true);
502     BOOST_TEST(queueDescriptor.m_Parameters.m_TransposeWeightMatrix == true);
503
504     BOOST_TEST(queueDescriptor.m_Inputs.size() == 1);
505     BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
506     BOOST_TEST((queueDescriptor.m_Weight->GetTensorInfo() == TensorInfo({7, 20}, DataType, inputsQScale)));
507     BOOST_TEST((queueDescriptor.m_Bias->GetTensorInfo() == TensorInfo({7}, GetBiasDataType(DataType), inputsQScale)));
508
509     // Returns so we can do extra, backend-specific tests.
510     return workload;
511 }
512
513 template <typename NormalizationWorkload, armnn::DataType DataType>
514 std::unique_ptr<NormalizationWorkload> CreateNormalizationWorkloadTest(armnn::IWorkloadFactory& factory,
515                                                                        armnn::Graph& graph,
516                                                                        DataLayout dataLayout = DataLayout::NCHW)
517 {
518     // Creates the layer we're testing.
519     NormalizationDescriptor layerDesc;
520     layerDesc.m_NormChannelType = NormalizationAlgorithmChannel::Across;
521     layerDesc.m_NormMethodType = NormalizationAlgorithmMethod::LocalBrightness;
522     layerDesc.m_NormSize = 3;
523     layerDesc.m_Alpha = 0.5f;
524     layerDesc.m_Beta = -1.0f;
525     layerDesc.m_K = 0.2f;
526     layerDesc.m_DataLayout = dataLayout;
527
528     NormalizationLayer* layer = graph.AddLayer<NormalizationLayer>(layerDesc, "layer");
529
530     // Creates extra layers.
531     Layer* const input = graph.AddLayer<InputLayer>(0, "input");
532     Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
533
534     TensorShape inputShape = (dataLayout == DataLayout::NCHW) ?
535                 TensorShape{ 3, 5, 5, 1 } : TensorShape{ 3, 1, 5, 5 };
536     TensorShape outputShape = (dataLayout == DataLayout::NCHW) ?
537                 TensorShape{ 3, 5, 5, 1 } : TensorShape{ 3, 1, 5, 5 };
538
539     // Connects up.
540     armnn::TensorInfo inputTensorInfo(inputShape, DataType);
541     armnn::TensorInfo outputTensorInfo(outputShape, DataType);
542     Connect(input, layer, inputTensorInfo);
543     Connect(layer, output, outputTensorInfo);
544     CreateTensorHandles(graph, factory);
545
546     // Makes the workload and checks it.
547     auto workload = MakeAndCheckWorkload<NormalizationWorkload>(*layer, graph, factory);
548
549     NormalizationQueueDescriptor queueDescriptor = workload->GetData();
550     BOOST_TEST((queueDescriptor.m_Parameters.m_NormChannelType == NormalizationAlgorithmChannel::Across));
551     BOOST_TEST((queueDescriptor.m_Parameters.m_NormMethodType == NormalizationAlgorithmMethod::LocalBrightness));
552     BOOST_TEST(queueDescriptor.m_Parameters.m_NormSize == 3);
553     BOOST_TEST(queueDescriptor.m_Parameters.m_Alpha == 0.5f);
554     BOOST_TEST(queueDescriptor.m_Parameters.m_Beta == -1.0f);
555     BOOST_TEST(queueDescriptor.m_Parameters.m_K == 0.2f);
556     BOOST_TEST((queueDescriptor.m_Parameters.m_DataLayout == dataLayout));
557
558     BOOST_TEST(queueDescriptor.m_Inputs.size() == 1);
559     BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
560
561     // Returns so we can do extra, backend-specific tests.
562     return workload;
563 }
564
565 template <typename Pooling2dWorkload, armnn::DataType DataType>
566 std::unique_ptr<Pooling2dWorkload> CreatePooling2dWorkloadTest(armnn::IWorkloadFactory& factory,
567                                                                armnn::Graph&            graph,
568                                                                DataLayout dataLayout = DataLayout::NCHW)
569 {
570     // Creates the layer we're testing.
571     Pooling2dDescriptor layerDesc;
572     layerDesc.m_PoolType = PoolingAlgorithm::Average;
573     layerDesc.m_PoolWidth = 3;
574     layerDesc.m_PoolHeight = 3;
575     layerDesc.m_PadLeft = 2;
576     layerDesc.m_PadRight = 2;
577     layerDesc.m_PadTop = 1;
578     layerDesc.m_PadBottom = 1;
579     layerDesc.m_StrideX = 2;
580     layerDesc.m_StrideY = 3;
581     layerDesc.m_OutputShapeRounding = OutputShapeRounding::Floor;
582     layerDesc.m_DataLayout = dataLayout;
583
584     Pooling2dLayer* const layer = graph.AddLayer<Pooling2dLayer>(layerDesc, "layer");
585
586     // Create extra layers
587     Layer* const input = graph.AddLayer<InputLayer>(0, "input");
588     Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
589
590     TensorShape inputShape  = (dataLayout == DataLayout::NCHW) ? TensorShape{3, 2, 5, 5} : TensorShape{3, 5, 5, 2};
591     TensorShape outputShape = (dataLayout == DataLayout::NCHW) ? TensorShape{3, 2, 2, 4} : TensorShape{3, 2, 4, 2};
592
593     // Connect up
594     Connect(input, layer, TensorInfo(inputShape, DataType));
595     Connect(layer, output, TensorInfo(outputShape, DataType));
596     CreateTensorHandles(graph, factory);
597
598     // Make the workload and checks it
599     auto workload = MakeAndCheckWorkload<Pooling2dWorkload>(*layer, graph, factory);
600
601     Pooling2dQueueDescriptor queueDescriptor = workload->GetData();
602     BOOST_TEST((queueDescriptor.m_Parameters.m_PoolType == PoolingAlgorithm::Average));
603     BOOST_TEST((queueDescriptor.m_Parameters.m_OutputShapeRounding == OutputShapeRounding::Floor));
604     BOOST_TEST(queueDescriptor.m_Parameters.m_PoolWidth == 3);
605     BOOST_TEST(queueDescriptor.m_Parameters.m_PoolHeight == 3);
606     BOOST_TEST(queueDescriptor.m_Parameters.m_StrideX == 2);
607     BOOST_TEST(queueDescriptor.m_Parameters.m_StrideY == 3);
608     BOOST_TEST(queueDescriptor.m_Parameters.m_PadLeft == 2);
609     BOOST_TEST(queueDescriptor.m_Parameters.m_PadRight == 2);
610     BOOST_TEST(queueDescriptor.m_Parameters.m_PadTop == 1);
611     BOOST_TEST(queueDescriptor.m_Parameters.m_PadBottom == 1);
612     BOOST_TEST((queueDescriptor.m_Parameters.m_DataLayout == dataLayout));
613
614     BOOST_TEST(queueDescriptor.m_Inputs.size() == 1);
615     BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
616
617     // Return so we can do extra, backend-specific tests
618     return workload;
619 }
620
621 template <typename SoftmaxWorkload, armnn::DataType DataType>
622 std::unique_ptr<SoftmaxWorkload> CreateSoftmaxWorkloadTest(armnn::IWorkloadFactory& factory,
623                                                            armnn::Graph&            graph)
624 {
625     // Create the layer we're testing.
626     SoftmaxDescriptor softmaxDescriptor;
627     Layer* const layer = graph.AddLayer<SoftmaxLayer>(softmaxDescriptor, "layer");
628
629     // Create extra layers.
630     Layer* const input = graph.AddLayer<InputLayer>(0, "input");
631     Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
632
633     // Connect up
634     armnn::TensorInfo tensorInfo({4, 1}, DataType);
635     Connect(input, layer, tensorInfo);
636     Connect(layer, output, tensorInfo);
637     CreateTensorHandles(graph, factory);
638
639     // Make the workload and checks it.
640     auto workload = MakeAndCheckWorkload<SoftmaxWorkload>(*layer, graph, factory);
641
642     SoftmaxQueueDescriptor queueDescriptor = workload->GetData();
643     BOOST_TEST(queueDescriptor.m_Inputs.size() == 1);
644     BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
645
646     // Return so we can do extra, backend-specific tests.
647     return workload;
648 }
649
650 template<typename SplitterWorkload, armnn::DataType DataType>
651 std::unique_ptr<SplitterWorkload>
652     CreateSplitterWorkloadTest(armnn::IWorkloadFactory& factory, armnn::Graph& graph)
653 {
654     // Create the layer we're testing.
655     // NOTE: need three dimensions channels, height/y, width/x because the Compute
656     //       library restricts subtensors to have the same x and y dimensions as
657     //       their parent tensors, and therefore the origin on the x and y dimension
658     //       has to be zero for any view. So we need a third dimension to split...
659     // NOTE: arguments are: number of views, number of dimensions.
660     ViewsDescriptor layerDesc(3, 3);
661     // NOTE: arguments are: view, dimension, value.
662     layerDesc.SetViewOriginCoord(0, 0, 0);
663     layerDesc.SetViewOriginCoord(1, 0, 1);
664     layerDesc.SetViewOriginCoord(2, 0, 3);
665
666     Layer* const layer = graph.AddLayer<SplitterLayer>(layerDesc, "layer");
667
668     // Adds extra layers.
669     Layer* const input = graph.AddLayer<InputLayer>(0, "input");
670     Layer* const output0 = graph.AddLayer<OutputLayer>(0, "output0");
671     Layer* const output1 = graph.AddLayer<OutputLayer>(1, "output1");
672     Layer* const output2 = graph.AddLayer<OutputLayer>(2, "output2");
673
674     // Connects up.
675     armnn::TensorInfo tensorInfo({5, 7, 7}, DataType);
676     Connect(input, layer, tensorInfo);
677
678     armnn::TensorInfo output0Info({1, 7, 7}, DataType);
679     armnn::TensorInfo output1Info({2, 7, 7}, DataType);
680     armnn::TensorInfo output2Info({2, 7, 7}, DataType);
681
682     Connect(layer, output0, output0Info, 0, 0);
683     Connect(layer, output1, output1Info, 1, 0);
684     Connect(layer, output2, output2Info, 2, 0);
685
686     CreateTensorHandles(graph, factory);
687
688     // Makes the workload and checks it.
689     auto workload = MakeAndCheckWorkload<SplitterWorkload>(*layer, graph, factory);
690
691     SplitterQueueDescriptor queueDescriptor = workload->GetData();
692     BOOST_TEST(queueDescriptor.m_Inputs.size() == 1);
693     BOOST_TEST(queueDescriptor.m_Outputs.size() == 3);
694     BOOST_TEST(queueDescriptor.m_ViewOrigins.size() == 3);
695
696     BOOST_TEST(queueDescriptor.m_ViewOrigins[0].m_Origin[0] == 0);
697     BOOST_TEST(queueDescriptor.m_ViewOrigins[1].m_Origin[0] == 1);
698     BOOST_TEST(queueDescriptor.m_ViewOrigins[2].m_Origin[0] == 3);
699     BOOST_TEST(queueDescriptor.m_ViewOrigins[0].m_Origin[1] == 0);
700     BOOST_TEST(queueDescriptor.m_ViewOrigins[1].m_Origin[1] == 0);
701     BOOST_TEST(queueDescriptor.m_ViewOrigins[2].m_Origin[1] == 0);
702     BOOST_TEST(queueDescriptor.m_ViewOrigins[0].m_Origin[2] == 0);
703     BOOST_TEST(queueDescriptor.m_ViewOrigins[1].m_Origin[2] == 0);
704     BOOST_TEST(queueDescriptor.m_ViewOrigins[2].m_Origin[2] == 0);
705
706     // Returns so we can do extra, backend-specific tests.
707     return workload;
708 }
709
710 /// This function constructs a graph with both a splitter and a merger, and returns a pair of the workloads.
711 template<typename SplitterWorkload, typename MergerWorkload, armnn::DataType DataType>
712 std::pair<std::unique_ptr<SplitterWorkload>, std::unique_ptr<MergerWorkload>>
713     CreateSplitterMergerWorkloadTest(armnn::IWorkloadFactory& factory, armnn::Graph& graph)
714 {
715     armnn::TensorInfo inputTensorInfo({ 1, 2, 100, 10 }, DataType);
716
717     armnn::TensorInfo splitTensorInfo1({ 1, 1, 100, 10 }, DataType);
718     armnn::TensorInfo splitTensorInfo2({ 1, 1, 100, 10 }, DataType);
719
720     //Constructs the graph.
721     Layer* const input = graph.AddLayer<InputLayer>(0, "input");
722
723     armnn::ViewsDescriptor splitterViews(2);
724     splitterViews.SetViewOriginCoord(0, 0, 0);
725     splitterViews.SetViewOriginCoord(0, 1, 0);
726     splitterViews.SetViewOriginCoord(0, 2, 0);
727     splitterViews.SetViewOriginCoord(0, 3, 0);
728
729     splitterViews.SetViewOriginCoord(1, 0, 0);
730     splitterViews.SetViewOriginCoord(1, 1, 1);
731     splitterViews.SetViewOriginCoord(1, 2, 0);
732     splitterViews.SetViewOriginCoord(1, 3, 0);
733
734     Layer* const splitter = graph.AddLayer<SplitterLayer>(splitterViews, "splitter");
735     BOOST_TEST_CHECKPOINT("created splitter layer");
736
737     armnn::OriginsDescriptor mergerViews(2);
738     mergerViews.SetViewOriginCoord(0, 0, 0);
739     mergerViews.SetViewOriginCoord(0, 1, 1);
740     mergerViews.SetViewOriginCoord(0, 2, 0);
741     mergerViews.SetViewOriginCoord(0, 3, 0);
742
743     mergerViews.SetViewOriginCoord(1, 0, 0);
744     mergerViews.SetViewOriginCoord(1, 1, 0);
745     mergerViews.SetViewOriginCoord(1, 2, 0);
746     mergerViews.SetViewOriginCoord(1, 3, 0);
747
748     Layer* const merger = graph.AddLayer<MergerLayer>(mergerViews, "merger");
749     BOOST_TEST_CHECKPOINT("created merger layer");
750
751     Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
752
753     // Adds connections.
754     Connect(input, splitter, inputTensorInfo, 0, 0);
755     BOOST_TEST_CHECKPOINT("connect input to splitter");
756     Connect(splitter, merger, splitTensorInfo1, 0, 1); // The splitter & merger are connected up.
757     BOOST_TEST_CHECKPOINT("connect splitter[0] to merger[1]");
758     Connect(splitter, merger, splitTensorInfo2, 1, 0); // So that the outputs are flipped round.
759     BOOST_TEST_CHECKPOINT("connect splitter[1] to merger[0]");
760     Connect(merger, output, inputTensorInfo, 0, 0);
761     BOOST_TEST_CHECKPOINT("connect merger to output");
762
763     CreateTensorHandles(graph, factory);
764     BOOST_TEST_CHECKPOINT("created tensor handles");
765
766     auto workloadSplitter = MakeAndCheckWorkload<SplitterWorkload>(*splitter, graph, factory);
767     BOOST_TEST_CHECKPOINT("created splitter workload");
768     auto workloadMerger = MakeAndCheckWorkload<MergerWorkload>(*merger, graph, factory);
769     BOOST_TEST_CHECKPOINT("created merger workload");
770
771     return {std::move(workloadSplitter), std::move(workloadMerger)};
772 }
773
774
775 /// This function constructs a graph with a splitter with two outputs. Each of the outputs is then
776 /// connected to two different activation layers
777 template<typename SplitterWorkload, typename ActivationWorkload, armnn::DataType DataType>
778 void CreateSplitterMultipleInputsOneOutputWorkloadTest(armnn::IWorkloadFactory& factory, armnn::Graph& graph,
779                                  std::unique_ptr<SplitterWorkload>& wlSplitter,
780                                  std::unique_ptr<ActivationWorkload>& wlActiv0_0,
781                                  std::unique_ptr<ActivationWorkload>& wlActiv0_1,
782                                  std::unique_ptr<ActivationWorkload>& wlActiv1_0,
783                                  std::unique_ptr<ActivationWorkload>& wlActiv1_1)
784 {
785     armnn::TensorInfo inputTensorInfo ({ 1, 3, 100, 50 }, DataType);
786     armnn::TensorInfo splitTensorInfo1({ 1, 1, 100, 50 }, DataType);
787     armnn::TensorInfo splitTensorInfo2({ 1, 2, 100, 50 }, DataType);
788
789     //Constructs the graph.
790     Layer* const input = graph.AddLayer<InputLayer>(0, "input");
791
792     armnn::ViewsDescriptor splitterViews(2);
793
794     splitterViews.SetViewOriginCoord(0, 0, 0);
795     splitterViews.SetViewOriginCoord(0, 1, 0);
796     splitterViews.SetViewOriginCoord(0, 2, 0);
797     splitterViews.SetViewOriginCoord(0, 3, 0);
798
799     splitterViews.SetViewOriginCoord(1, 0, 0);
800     splitterViews.SetViewOriginCoord(1, 1, 1);
801     splitterViews.SetViewOriginCoord(1, 2, 0);
802     splitterViews.SetViewOriginCoord(1, 3, 0);
803
804     Layer* const splitter = graph.AddLayer<SplitterLayer>(splitterViews, "splitter");
805
806     armnn::ActivationDescriptor activationDesc;
807
808     Layer* const activ0_0 = graph.AddLayer<ActivationLayer>(activationDesc, "activ0_0");
809     Layer* const activ0_1 = graph.AddLayer<ActivationLayer>(activationDesc, "activ0_1");
810     Layer* const activ1_0 = graph.AddLayer<ActivationLayer>(activationDesc, "activ1_0");
811     Layer* const activ1_1 = graph.AddLayer<ActivationLayer>(activationDesc, "activ1_1");
812
813     Layer* const output1 = graph.AddLayer<OutputLayer>(1, "output1");
814     Layer* const output2 = graph.AddLayer<OutputLayer>(2, "output2");
815     Layer* const output3 = graph.AddLayer<OutputLayer>(3, "output3");
816     Layer* const output4 = graph.AddLayer<OutputLayer>(4, "output4");
817
818     // Adds connections.
819     Connect(input, splitter, inputTensorInfo, 0, 0);
820     Connect(splitter, activ0_0, splitTensorInfo1, 0, 0);
821     Connect(splitter, activ0_1, splitTensorInfo1, 0, 0);
822
823     Connect(splitter, activ1_0, splitTensorInfo2, 1, 0);
824     Connect(splitter, activ1_1, splitTensorInfo2, 1, 0);
825
826     Connect(activ0_0, output1, splitTensorInfo1, 0, 0);
827     Connect(activ0_1, output2, splitTensorInfo1, 0, 0);
828     Connect(activ1_0, output3, splitTensorInfo2, 0, 0);
829     Connect(activ1_1, output4, splitTensorInfo2, 0, 0);
830
831     CreateTensorHandles(graph, factory);
832
833     auto workloadSplitter = MakeAndCheckWorkload<SplitterWorkload>(*splitter, graph, factory);
834     auto workloadActiv0_0 = MakeAndCheckWorkload<ActivationWorkload>(*activ0_0, graph, factory);
835     auto workloadActiv0_1 = MakeAndCheckWorkload<ActivationWorkload>(*activ0_1, graph, factory);
836     auto workloadActiv1_0 = MakeAndCheckWorkload<ActivationWorkload>(*activ1_0, graph, factory);
837     auto workloadActiv1_1 = MakeAndCheckWorkload<ActivationWorkload>(*activ1_1, graph, factory);
838
839     wlSplitter = std::move(workloadSplitter);
840     wlActiv0_0 = std::move(workloadActiv0_0);
841     wlActiv0_1 = std::move(workloadActiv0_1);
842     wlActiv1_0 = std::move(workloadActiv1_0);
843     wlActiv1_1 = std::move(workloadActiv1_1);
844 }
845
846 template <typename ResizeBilinearWorkload, armnn::DataType DataType>
847 std::unique_ptr<ResizeBilinearWorkload> CreateResizeBilinearWorkloadTest(armnn::IWorkloadFactory& factory,
848                                                                          armnn::Graph& graph,
849                                                                          DataLayoutIndexed dataLayout =
850                                                                              DataLayout::NCHW)
851 {
852     TensorShape inputShape;
853     TensorShape outputShape;
854
855     switch (dataLayout.GetDataLayout()) {
856         case DataLayout::NHWC:
857             inputShape =  { 2, 4, 4, 3 };
858             outputShape = { 2, 2, 2, 3 };
859             break;
860         case DataLayout::NCHW:
861         default:
862             inputShape =  { 2, 3, 4, 4 };
863             outputShape = { 2, 3, 2, 2 };
864     }
865
866     // Creates the layer we're testing.
867     ResizeBilinearDescriptor resizeDesc;
868     resizeDesc.m_TargetWidth = outputShape[dataLayout.GetWidthIndex()];
869     resizeDesc.m_TargetHeight = outputShape[dataLayout.GetHeightIndex()];
870     resizeDesc.m_DataLayout = dataLayout;
871     Layer* const layer = graph.AddLayer<ResizeBilinearLayer>(resizeDesc, "layer");
872
873     // Creates extra layers.
874     Layer* const input = graph.AddLayer<InputLayer>(0, "input");
875     Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
876
877     // Connects up.
878     armnn::TensorInfo inputTensorInfo(inputShape, DataType);
879     armnn::TensorInfo outputTensorInfo(outputShape, DataType);
880     Connect(input, layer, inputTensorInfo);
881     Connect(layer, output, outputTensorInfo);
882     CreateTensorHandles(graph, factory);
883
884     // Makes the workload and checks it.
885     auto workload = MakeAndCheckWorkload<ResizeBilinearWorkload>(*layer, graph, factory);
886
887     ResizeBilinearQueueDescriptor queueDescriptor = workload->GetData();
888     BOOST_TEST(queueDescriptor.m_Inputs.size() == 1);
889     BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
890     BOOST_TEST((queueDescriptor.m_Parameters.m_DataLayout.GetDataLayout() == dataLayout));
891
892     // Returns so we can do extra, backend-specific tests.
893     return workload;
894 }
895
896 template <typename L2NormalizationWorkload, armnn::DataType DataType>
897 std::unique_ptr<L2NormalizationWorkload> CreateL2NormalizationWorkloadTest(armnn::IWorkloadFactory& factory,
898     armnn::Graph& graph, DataLayout dataLayout = DataLayout::NCHW)
899 {
900     // Creates the layer we're testing.
901     L2NormalizationDescriptor layerDesc;
902     layerDesc.m_DataLayout = dataLayout;
903
904     Layer* const layer = graph.AddLayer<L2NormalizationLayer>(layerDesc, "l2norm");
905
906     // Creates extra layers.
907     Layer* const input = graph.AddLayer<InputLayer>(0, "input");
908     Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
909
910     TensorShape inputShape = (dataLayout == DataLayout::NCHW) ?
911                 TensorShape{ 5, 20, 50, 67 } : TensorShape{ 5, 50, 67, 20 };
912     TensorShape outputShape = (dataLayout == DataLayout::NCHW) ?
913                 TensorShape{ 5, 20, 50, 67 } : TensorShape{ 5, 50, 67, 20 };
914
915     // Connects up.
916     armnn::TensorInfo inputTensorInfo(inputShape, DataType);
917     armnn::TensorInfo outputTensorInfo(outputShape, DataType);
918     Connect(input, layer, inputTensorInfo);
919     Connect(layer, output, outputTensorInfo);
920     CreateTensorHandles(graph, factory);
921
922     // Makes the workload and checks it.
923     auto workload = MakeAndCheckWorkload<L2NormalizationWorkload>(*layer, graph, factory);
924
925     L2NormalizationQueueDescriptor queueDescriptor = workload->GetData();
926     BOOST_TEST((queueDescriptor.m_Parameters.m_DataLayout == dataLayout));
927     BOOST_TEST(queueDescriptor.m_Inputs.size() == 1);
928     BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
929
930     // Returns so we can do extra, backend-specific tests.
931     return workload;
932 }
933
934 template <typename ReshapeWorkload, armnn::DataType DataType>
935 std::unique_ptr<ReshapeWorkload> CreateReshapeWorkloadTest(armnn::IWorkloadFactory& factory,
936     armnn::Graph& graph)
937 {
938     // Creates the layer we're testing.
939     TensorShape outputShape({ 1, 4 });
940     ReshapeDescriptor reshapeDesc;
941     reshapeDesc.m_TargetShape = outputShape;
942     Layer* const layer = graph.AddLayer<ReshapeLayer>(reshapeDesc, "layer");
943
944     // Creates extra layers.
945     Layer* const input = graph.AddLayer<InputLayer>(0, "input");
946     Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
947
948     // Connects up.
949     armnn::TensorInfo inputTensorInfo({ 4, 1 }, DataType);
950     armnn::TensorInfo outputTensorInfo(outputShape, DataType);
951     Connect(input, layer, inputTensorInfo);
952     Connect(layer, output, outputTensorInfo);
953     CreateTensorHandles(graph, factory);
954
955     // Makes the workload and checks it.
956     auto workload = MakeAndCheckWorkload<ReshapeWorkload>(*layer, graph, factory);
957
958     ReshapeQueueDescriptor queueDescriptor = workload->GetData();
959     BOOST_TEST(queueDescriptor.m_Inputs.size() == 1);
960     BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
961
962     // Returns so we can do extra, backend-specific tests.
963     return workload;
964 }
965
966 template <typename ConvertFp16ToFp32Float32Workload>
967 std::unique_ptr<ConvertFp16ToFp32Float32Workload> CreateConvertFp16ToFp32WorkloadTest(
968     armnn::IWorkloadFactory& factory, armnn::Graph& graph)
969 {
970     // Creates the layer we're testing.
971     ConvertFp16ToFp32Layer* const layer = graph.AddLayer<ConvertFp16ToFp32Layer>("Fp16ToFp32Converter");
972
973     // Creates extra layers.
974     Layer* const input = graph.AddLayer<InputLayer>(0, "input");
975     Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
976
977     // Connects up.
978     armnn::TensorInfo inputTensorInfo({1, 3, 2, 3}, armnn::DataType::Float16);
979     armnn::TensorInfo outputTensorInfo({1, 3, 2, 3}, armnn::DataType::Float32);
980     Connect(input, layer, inputTensorInfo);
981     Connect(layer, output, outputTensorInfo);
982     CreateTensorHandles(graph, factory);
983
984     // Makes the workload and checks it.
985     auto workload = MakeAndCheckWorkload<ConvertFp16ToFp32Float32Workload>(*layer, graph, factory);
986
987     ConvertFp16ToFp32QueueDescriptor queueDescriptor = workload->GetData();
988     BOOST_TEST(queueDescriptor.m_Inputs.size() == 1);
989     BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
990
991     // Returns so we can do extra, backend-specific tests.
992     return workload;
993 }
994
995 template <typename ConvertFp32ToFp16Float16Workload>
996 std::unique_ptr<ConvertFp32ToFp16Float16Workload> CreateConvertFp32ToFp16WorkloadTest(
997     armnn::IWorkloadFactory& factory, armnn::Graph& graph)
998 {
999     // Creates the layer we're testing.
1000     ConvertFp32ToFp16Layer* const layer = graph.AddLayer<ConvertFp32ToFp16Layer>("Fp32ToFp16Converter");
1001
1002     // Creates extra layers.
1003     Layer* const input = graph.AddLayer<InputLayer>(0, "input");
1004     Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
1005
1006     // Connects up.
1007     armnn::TensorInfo inputTensorInfo({1, 3, 2, 3}, armnn::DataType::Float32);
1008     armnn::TensorInfo outputTensorInfo({1, 3, 2, 3}, armnn::DataType::Float16);
1009     Connect(input, layer, inputTensorInfo);
1010     Connect(layer, output, outputTensorInfo);
1011     CreateTensorHandles(graph, factory);
1012
1013     // Makes the workload and checks it.
1014     auto workload = MakeAndCheckWorkload<ConvertFp32ToFp16Float16Workload>(*layer, graph, factory);
1015
1016     ConvertFp32ToFp16QueueDescriptor queueDescriptor = workload->GetData();
1017     BOOST_TEST(queueDescriptor.m_Inputs.size() == 1);
1018     BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
1019
1020     // Returns so we can do extra, backend-specific tests.
1021     return workload;
1022 }
1023
1024 template <typename MeanWorkload, armnn::DataType DataType>
1025 std::unique_ptr<MeanWorkload> CreateMeanWorkloadTest(armnn::IWorkloadFactory& factory, armnn::Graph& graph)
1026 {
1027     // Reduce along the first and second dimensions, and do not keep the reduced dimensions.
1028     MeanDescriptor descriptor({ 1, 2 }, false);
1029
1030     // Creates the layer we're testing.
1031     Layer* const layer = graph.AddLayer<MeanLayer>(descriptor, "mean");
1032
1033     // Creates extra layers.
1034     Layer* const input = graph.AddLayer<InputLayer>(0, "input");
1035     Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
1036
1037     // Connects up.
1038     armnn::TensorInfo inputTensorInfo({ 1, 3, 7, 4 }, DataType);
1039     armnn::TensorInfo outputTensorInfo({ 1, 4 }, DataType);
1040     Connect(input, layer, inputTensorInfo);
1041     Connect(layer, output, outputTensorInfo);
1042     CreateTensorHandles(graph, factory);
1043
1044     // Makes the workload and checks it.
1045     auto workload = MakeAndCheckWorkload<MeanWorkload>(*layer, graph, factory);
1046
1047     MeanQueueDescriptor queueDescriptor = workload->GetData();
1048     BOOST_TEST(queueDescriptor.m_Parameters.m_Axis == descriptor.m_Axis);
1049     BOOST_TEST(queueDescriptor.m_Parameters.m_KeepDims == descriptor.m_KeepDims);
1050     BOOST_TEST(queueDescriptor.m_Inputs.size() == 1);
1051     BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
1052
1053     // Returns so we can do extra, backend-specific tests.
1054     return workload;
1055 }
1056
1057 }