2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
7 #include <boost/test/unit_test.hpp>
9 #include <boost/cast.hpp>
11 #include <backends/WorkloadData.hpp>
12 #include <backends/CpuTensorHandle.hpp>
19 using namespace armnn;
26 // Calls CreateWorkload for a layer, and checks the returned pointer is of the correct type.
27 template<typename Workload>
28 std::unique_ptr<Workload> MakeAndCheckWorkload(Layer& layer, Graph& graph, const IWorkloadFactory& factory)
30 std::unique_ptr<IWorkload> workload = layer.CreateWorkload(graph, factory);
31 BOOST_TEST(workload.get() == boost::polymorphic_downcast<Workload*>(workload.get()),
32 "Cannot convert to derived class");
33 std::string reasonIfUnsupported;
34 layer.SetComputeDevice(factory.GetCompute());
35 BOOST_TEST(factory.IsLayerSupported(layer, layer.GetDataType(), reasonIfUnsupported));
36 return std::unique_ptr<Workload>(static_cast<Workload*>(workload.release()));
39 // Connects two layers.
40 void Connect(Layer* from, Layer* to, const TensorInfo& tensorInfo, unsigned int fromIndex = 0, unsigned int toIndex = 0)
42 from->GetOutputSlot(fromIndex).Connect(to->GetInputSlot(toIndex));
43 from->GetOutputHandler(fromIndex).SetTensorInfo(tensorInfo);
46 // Helper function to create tensor handlers for workloads, assuming they all use the same factory.
47 void CreateTensorHandles(armnn::Graph& graph, armnn::IWorkloadFactory& factory)
49 for (auto&& layer : graph.TopologicalSort())
51 layer->CreateTensorHandles(graph, factory);
55 /////////////////////////////////////////////////////////////////////////////////////////////
56 // The following functions are called by backends/test/CreateWorkload*.cpp
57 // They build very simple graphs, and then create a workload.
58 // Some checks are performed on the workload to ensure parameters have been passed correctly.
59 // They return the created workloads so that backend-specific checks can be performed.
60 /////////////////////////////////////////////////////////////////////////////////////////////
62 template <typename ActivationWorkload, armnn::DataType DataType>
63 std::unique_ptr<ActivationWorkload> CreateActivationWorkloadTest(armnn::IWorkloadFactory& factory,
66 // Creates the layer we're testing.
67 ActivationDescriptor layerDesc;
68 layerDesc.m_Function = ActivationFunction::Abs;
70 layerDesc.m_B = -10.0f;
72 ActivationLayer* const layer = graph.AddLayer<ActivationLayer>(layerDesc, "layer");
74 // Creates extra layers.
75 Layer* const input = graph.AddLayer<InputLayer>(0, "input");
76 Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
79 armnn::TensorInfo tensorInfo({1, 1}, DataType);
81 Connect(input, layer, tensorInfo);
82 Connect(layer, output, tensorInfo);
84 CreateTensorHandles(graph, factory);
86 // Makes the workload and checks it.
87 auto workload = MakeAndCheckWorkload<ActivationWorkload>(*layer, graph, factory);
89 ActivationQueueDescriptor queueDescriptor = workload->GetData();
90 BOOST_TEST(queueDescriptor.m_Inputs.size() == 1);
91 BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
92 BOOST_TEST(queueDescriptor.m_Parameters.m_A == 3.5f);
93 BOOST_TEST(queueDescriptor.m_Parameters.m_B == -10.0f);
94 BOOST_TEST((queueDescriptor.m_Parameters.m_Function == ActivationFunction::Abs));
96 // Returns so we can do extra, backend-specific tests.
100 template <typename WorkloadType,
101 typename DescriptorType,
103 armnn::DataType DataType>
104 std::unique_ptr<WorkloadType> CreateArithmeticWorkloadTest(armnn::IWorkloadFactory& factory,
107 // Creates the layer we're testing.
108 Layer* const layer = graph.AddLayer<LayerType>("layer");
110 // Creates extra layers.
111 Layer* const input1 = graph.AddLayer<InputLayer>(1, "input1");
112 Layer* const input2 = graph.AddLayer<InputLayer>(2, "input2");
113 Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
116 armnn::TensorInfo tensorInfo({2, 3}, DataType);
117 Connect(input1, layer, tensorInfo, 0, 0);
118 Connect(input2, layer, tensorInfo, 0, 1);
119 Connect(layer, output, tensorInfo);
120 CreateTensorHandles(graph, factory);
122 // Makes the workload and checks it.
123 auto workload = MakeAndCheckWorkload<WorkloadType>(*layer, graph, factory);
125 DescriptorType queueDescriptor = workload->GetData();
126 BOOST_TEST(queueDescriptor.m_Inputs.size() == 2);
127 BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
129 // Returns so we can do extra, backend-specific tests.
133 template <typename BatchNormalizationFloat32Workload, armnn::DataType DataType>
134 std::unique_ptr<BatchNormalizationFloat32Workload> CreateBatchNormalizationWorkloadTest(
135 armnn::IWorkloadFactory& factory, armnn::Graph& graph)
137 // Creates the layer we're testing.
138 BatchNormalizationDescriptor layerDesc;
139 layerDesc.m_Eps = 0.05f;
141 BatchNormalizationLayer* const layer = graph.AddLayer<BatchNormalizationLayer>(layerDesc, "layer");
143 armnn::TensorInfo weightInfo({3}, DataType);
144 layer->m_Mean = std::make_unique<ScopedCpuTensorHandle>(weightInfo);
145 layer->m_Variance = std::make_unique<ScopedCpuTensorHandle>(weightInfo);
146 layer->m_Beta = std::make_unique<ScopedCpuTensorHandle>(weightInfo);
147 layer->m_Gamma = std::make_unique<ScopedCpuTensorHandle>(weightInfo);
148 layer->m_Mean->Allocate();
149 layer->m_Variance->Allocate();
150 layer->m_Beta->Allocate();
151 layer->m_Gamma->Allocate();
153 // Creates extra layers.
154 Layer* const input = graph.AddLayer<InputLayer>(0, "input");
155 Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
158 armnn::TensorInfo tensorInfo({2, 3, 1, 1}, DataType);
159 Connect(input, layer, tensorInfo);
160 Connect(layer, output, tensorInfo);
161 CreateTensorHandles(graph, factory);
163 // Makes the workload and checks it.
164 auto workload = MakeAndCheckWorkload<BatchNormalizationFloat32Workload>(*layer, graph, factory);
165 BatchNormalizationQueueDescriptor queueDescriptor = workload->GetData();
166 BOOST_TEST(queueDescriptor.m_Parameters.m_Eps == 0.05f);
167 BOOST_TEST(queueDescriptor.m_Inputs.size() == 1);
168 BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
169 BOOST_TEST((queueDescriptor.m_Mean->GetTensorInfo() == TensorInfo({3}, DataType)));
170 BOOST_TEST((queueDescriptor.m_Variance->GetTensorInfo() == TensorInfo({3}, DataType)));
171 BOOST_TEST((queueDescriptor.m_Gamma->GetTensorInfo() == TensorInfo({3}, DataType)));
172 BOOST_TEST((queueDescriptor.m_Beta->GetTensorInfo() == TensorInfo({3}, DataType)));
174 // Returns so we can do extra, backend-specific tests.
178 template <typename Convolution2dWorkload, armnn::DataType DataType>
179 std::unique_ptr<Convolution2dWorkload> CreateConvolution2dWorkloadTest(armnn::IWorkloadFactory& factory,
181 DataLayout dataLayout = DataLayout::NCHW)
183 // Creates the layer we're testing.
184 Convolution2dDescriptor layerDesc;
185 layerDesc.m_PadLeft = 3;
186 layerDesc.m_PadRight = 3;
187 layerDesc.m_PadTop = 1;
188 layerDesc.m_PadBottom = 1;
189 layerDesc.m_StrideX = 2;
190 layerDesc.m_StrideY = 4;
191 layerDesc.m_BiasEnabled = true;
192 layerDesc.m_DataLayout = dataLayout;
194 Convolution2dLayer* const layer = graph.AddLayer<Convolution2dLayer>(layerDesc, "layer");
196 TensorShape weightShape = (dataLayout == DataLayout::NCHW) ? TensorShape{2, 3, 5, 3} : TensorShape{2, 5, 3, 3};
197 TensorShape inputShape = (dataLayout == DataLayout::NCHW) ? TensorShape{2, 3, 8, 16} : TensorShape{2, 8, 16, 3};
198 TensorShape outputShape = (dataLayout == DataLayout::NCHW) ? TensorShape{2, 2, 2, 10} : TensorShape{2, 2, 10, 2};
200 layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(TensorInfo(weightShape, DataType));
201 layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({2}, GetBiasDataType(DataType)));
203 layer->m_Weight->Allocate();
204 layer->m_Bias->Allocate();
206 // Creates extra layers.
207 Layer* const input = graph.AddLayer<InputLayer>(0, "input");
208 Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
211 Connect(input, layer, TensorInfo(inputShape, DataType));
212 Connect(layer, output, TensorInfo(outputShape, DataType));
213 CreateTensorHandles(graph, factory);
215 // Makes the workload and checks it.
216 auto workload = MakeAndCheckWorkload<Convolution2dWorkload>(*layer, graph, factory);
218 Convolution2dQueueDescriptor queueDescriptor = workload->GetData();
219 BOOST_TEST(queueDescriptor.m_Parameters.m_StrideX == 2);
220 BOOST_TEST(queueDescriptor.m_Parameters.m_StrideY == 4);
221 BOOST_TEST(queueDescriptor.m_Parameters.m_PadLeft == 3);
222 BOOST_TEST(queueDescriptor.m_Parameters.m_PadRight == 3);
223 BOOST_TEST(queueDescriptor.m_Parameters.m_PadTop == 1);
224 BOOST_TEST(queueDescriptor.m_Parameters.m_PadBottom == 1);
225 BOOST_TEST(queueDescriptor.m_Parameters.m_BiasEnabled);
226 BOOST_TEST((queueDescriptor.m_Parameters.m_DataLayout == dataLayout));
228 BOOST_TEST(queueDescriptor.m_Inputs.size() == 1);
229 BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
230 BOOST_TEST((queueDescriptor.m_Weight->GetTensorInfo() == TensorInfo(weightShape, DataType)));
231 BOOST_TEST((queueDescriptor.m_Bias->GetTensorInfo() ==
232 TensorInfo({2}, GetBiasDataType(DataType))));
234 // Returns so we can do extra, backend-specific tests.
238 template <typename LstmWorkload>
239 std::unique_ptr<LstmWorkload> CreateLstmWorkloadTest(armnn::IWorkloadFactory& factory, armnn::Graph& graph)
241 // This parameter setting is for withCifgWithPeepholeNoProjection
242 LstmDescriptor layerDesc;
243 layerDesc.m_ActivationFunc = 4;
244 layerDesc.m_ClippingThresCell = 0.0f;
245 layerDesc.m_ClippingThresProj = 0.0f;
246 layerDesc.m_CifgEnabled = true;
247 layerDesc.m_PeepholeEnabled = true;
248 layerDesc.m_ProjectionEnabled = false;
250 LstmLayer* const layer = graph.AddLayer<LstmLayer>(layerDesc, "layer");
251 unsigned int batchSize = 2;
252 unsigned int inputSize = 2;
253 unsigned int numUnits = 4;
254 unsigned int outputSize = 4;
256 layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique<ScopedCpuTensorHandle>
257 (TensorInfo({ numUnits, inputSize }, DataType::Float32));
258 layer->m_BasicParameters.m_InputToCellWeights = std::make_unique<ScopedCpuTensorHandle>
259 (TensorInfo({ numUnits, inputSize }, DataType::Float32));
260 layer->m_BasicParameters.m_InputToOutputWeights = std::make_unique<ScopedCpuTensorHandle>
261 (TensorInfo({ numUnits, inputSize }, DataType::Float32));
262 layer->m_BasicParameters.m_RecurrentToForgetWeights = std::make_unique<ScopedCpuTensorHandle>
263 (TensorInfo({ numUnits, outputSize }, DataType::Float32));
264 layer->m_BasicParameters.m_RecurrentToCellWeights = std::make_unique<ScopedCpuTensorHandle>
265 (TensorInfo({ numUnits, outputSize }, DataType::Float32));
266 layer->m_BasicParameters.m_RecurrentToOutputWeights = std::make_unique<ScopedCpuTensorHandle>
267 (TensorInfo({ numUnits, outputSize }, DataType::Float32));
268 layer->m_BasicParameters.m_ForgetGateBias = std::make_unique<ScopedCpuTensorHandle>
269 (TensorInfo({ numUnits }, DataType::Float32));
270 layer->m_BasicParameters.m_CellBias = std::make_unique<ScopedCpuTensorHandle>
271 (TensorInfo({ numUnits }, DataType::Float32));
272 layer->m_BasicParameters.m_OutputGateBias = std::make_unique<ScopedCpuTensorHandle>
273 (TensorInfo({ numUnits }, DataType::Float32));
275 layer->m_BasicParameters.m_InputToForgetWeights->Allocate();
276 layer->m_BasicParameters.m_InputToCellWeights->Allocate();
277 layer->m_BasicParameters.m_InputToOutputWeights->Allocate();
278 layer->m_BasicParameters.m_RecurrentToForgetWeights->Allocate();
279 layer->m_BasicParameters.m_RecurrentToCellWeights->Allocate();
280 layer->m_BasicParameters.m_RecurrentToOutputWeights->Allocate();
281 layer->m_BasicParameters.m_ForgetGateBias->Allocate();
282 layer->m_BasicParameters.m_CellBias->Allocate();
283 layer->m_BasicParameters.m_OutputGateBias->Allocate();
286 if (layerDesc.m_PeepholeEnabled)
288 layer->m_PeepholeParameters.m_CellToForgetWeights = std::make_unique<ScopedCpuTensorHandle>
289 (TensorInfo({ numUnits }, DataType::Float32));
290 layer->m_PeepholeParameters.m_CellToOutputWeights = std::make_unique<ScopedCpuTensorHandle>
291 (TensorInfo({ numUnits }, DataType::Float32));
292 layer->m_PeepholeParameters.m_CellToForgetWeights->Allocate();
293 layer->m_PeepholeParameters.m_CellToOutputWeights->Allocate();
296 // create input and output layers
297 Layer* const input = graph.AddLayer<InputLayer>(0, "input");
298 Layer* const outputStateIn = graph.AddLayer<InputLayer>(1, "outputStateIn");
299 Layer* const cellStateIn = graph.AddLayer<InputLayer>(2, "cellStateIn");
300 Layer* const scratchBuffer = graph.AddLayer<OutputLayer>(0, "scratchBuffer");
301 Layer* const outputStateOut = graph.AddLayer<OutputLayer>(1, "outputStateOut");
302 Layer* const cellStateOut = graph.AddLayer<OutputLayer>(2, "cellStateOut");
303 Layer* const output = graph.AddLayer<OutputLayer>(3, "output");
306 armnn::TensorInfo lstmTensorInfo1({ batchSize, inputSize }, DataType::Float32);
307 armnn::TensorInfo lstmTensorInfo2({ batchSize, numUnits}, DataType::Float32);
308 armnn::TensorInfo lstmTensorInfo3({ batchSize, outputSize }, DataType::Float32);
309 armnn::TensorInfo lstmTensorInfoScratchBuff({ batchSize, numUnits*3 }, DataType::Float32);
310 if (layerDesc.m_CifgEnabled)
312 lstmTensorInfoScratchBuff.SetShape({ batchSize, numUnits*4 });
315 Connect(input, layer, lstmTensorInfo1, 0, 0);
316 Connect(cellStateIn, layer, lstmTensorInfo2, 0, 1);
317 Connect(outputStateIn, layer, lstmTensorInfo3, 0, 2);
318 Connect(layer, scratchBuffer, lstmTensorInfoScratchBuff, 0, 0);
319 Connect(layer, outputStateOut, lstmTensorInfo3, 1, 0);
320 Connect(layer, cellStateOut, lstmTensorInfo2, 2, 0);
321 Connect(layer, output, lstmTensorInfo3, 3, 0);
323 CreateTensorHandles(graph, factory);
325 // make the workload and check it
326 auto workload = MakeAndCheckWorkload<LstmWorkload>(*layer, graph, factory);
327 LstmQueueDescriptor queueDescriptor = workload->GetData();
328 BOOST_TEST(queueDescriptor.m_Parameters.m_ActivationFunc == 4);
329 BOOST_TEST(queueDescriptor.m_Parameters.m_ClippingThresCell == 0.0f);
330 BOOST_TEST(queueDescriptor.m_Parameters.m_ClippingThresProj == 0.0f);
331 BOOST_TEST(queueDescriptor.m_Inputs.size() == 3);
332 BOOST_TEST(queueDescriptor.m_Outputs.size() == 4);
334 BOOST_TEST((queueDescriptor.m_InputToForgetWeights->GetTensorInfo() == TensorInfo({ numUnits, inputSize },
335 DataType::Float32)));
336 BOOST_TEST((queueDescriptor.m_OutputGateBias->GetTensorInfo() == TensorInfo({ numUnits },
337 DataType::Float32)));
338 BOOST_TEST((queueDescriptor.m_CellBias->GetTensorInfo() == TensorInfo({ numUnits }, DataType::Float32)));
342 template <typename Convolution2dWorkload, armnn::DataType DataType>
343 std::unique_ptr<Convolution2dWorkload> CreateDirectConvolution2dWorkloadTest(armnn::IWorkloadFactory& factory,
346 // Creates the layer we're testing.
347 Convolution2dDescriptor layerDesc;
348 layerDesc.m_PadLeft = 1;
349 layerDesc.m_PadRight = 1;
350 layerDesc.m_PadTop = 1;
351 layerDesc.m_PadBottom = 1;
352 layerDesc.m_StrideX = 1;
353 layerDesc.m_StrideY = 1;
354 layerDesc.m_BiasEnabled = true;
356 Convolution2dLayer* const layer = graph.AddLayer<Convolution2dLayer>(layerDesc, "layer");
358 float inputsQScale = DataType == armnn::DataType::QuantisedAsymm8 ? 1.0f : 0.0;
359 float outputQScale = DataType == armnn::DataType::QuantisedAsymm8 ? 2.0f : 0.0;
361 layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({ 2, 3, 3, 3 }, DataType, inputsQScale));
362 layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>
363 (TensorInfo({2}, GetBiasDataType(DataType), inputsQScale));
364 layer->m_Weight->Allocate();
365 layer->m_Bias->Allocate();
367 // Creates extra layers.
368 Layer* const input = graph.AddLayer<InputLayer>(0, "input");
369 Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
372 Connect(input, layer, TensorInfo({2, 3, 6, 6}, DataType, inputsQScale));
373 Connect(layer, output, TensorInfo({2, 2, 6, 6}, DataType, outputQScale));
374 CreateTensorHandles(graph, factory);
376 // Makes the workload and checks it.
377 auto workload = MakeAndCheckWorkload<Convolution2dWorkload>(*layer, graph, factory);
379 Convolution2dQueueDescriptor queueDescriptor = workload->GetData();
380 BOOST_TEST(queueDescriptor.m_Parameters.m_StrideX == 1);
381 BOOST_TEST(queueDescriptor.m_Parameters.m_StrideY == 1);
382 BOOST_TEST(queueDescriptor.m_Parameters.m_PadLeft == 1);
383 BOOST_TEST(queueDescriptor.m_Parameters.m_PadRight == 1);
384 BOOST_TEST(queueDescriptor.m_Parameters.m_PadTop == 1);
385 BOOST_TEST(queueDescriptor.m_Parameters.m_PadBottom == 1);
386 BOOST_TEST(queueDescriptor.m_Parameters.m_BiasEnabled == true);
388 BOOST_TEST(queueDescriptor.m_Inputs.size() == 1);
389 BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
390 BOOST_TEST((queueDescriptor.m_Weight->GetTensorInfo() == TensorInfo({2, 3, 3, 3},
391 DataType, inputsQScale)));
392 BOOST_TEST((queueDescriptor.m_Bias->GetTensorInfo()
393 == TensorInfo({2}, GetBiasDataType(DataType), inputsQScale)));
395 // Returns so we can do extra, backend-specific tests.
399 template <typename DepthwiseConvolution2dFloat32Workload>
400 std::unique_ptr<DepthwiseConvolution2dFloat32Workload> CreateDepthwiseConvolution2dWorkloadTest(
401 armnn::IWorkloadFactory& factory, armnn::Graph& graph)
403 // Creates the layer we're testing.
404 DepthwiseConvolution2dDescriptor layerDesc;
405 layerDesc.m_PadLeft = 3;
406 layerDesc.m_PadRight = 3;
407 layerDesc.m_PadTop = 1;
408 layerDesc.m_PadBottom = 1;
409 layerDesc.m_StrideX = 2;
410 layerDesc.m_StrideY = 4;
411 layerDesc.m_BiasEnabled = true;
413 DepthwiseConvolution2dLayer* const layer = graph.AddLayer<DepthwiseConvolution2dLayer>(layerDesc, "layer");
415 layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({3, 3, 5, 3}, DataType::Float32));
416 layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({9}, DataType::Float32));
417 layer->m_Weight->Allocate();
418 layer->m_Bias->Allocate();
420 // Creates extra layers.
421 Layer* const input = graph.AddLayer<InputLayer>(0, "input");
422 Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
425 Connect(input, layer, TensorInfo({2, 3, 8, 16}, armnn::DataType::Float32));
426 Connect(layer, output, TensorInfo({2, 9, 2, 10}, armnn::DataType::Float32));
427 CreateTensorHandles(graph, factory);
429 // Makes the workload and checks it.
430 auto workload = MakeAndCheckWorkload<DepthwiseConvolution2dFloat32Workload>(*layer, graph, factory);
432 DepthwiseConvolution2dQueueDescriptor queueDescriptor = workload->GetData();
433 BOOST_TEST(queueDescriptor.m_Parameters.m_StrideX == 2);
434 BOOST_TEST(queueDescriptor.m_Parameters.m_StrideY == 4);
435 BOOST_TEST(queueDescriptor.m_Parameters.m_PadLeft == 3);
436 BOOST_TEST(queueDescriptor.m_Parameters.m_PadRight == 3);
437 BOOST_TEST(queueDescriptor.m_Parameters.m_PadTop == 1);
438 BOOST_TEST(queueDescriptor.m_Parameters.m_PadBottom == 1);
439 BOOST_TEST(queueDescriptor.m_Parameters.m_BiasEnabled == true);
441 BOOST_TEST(queueDescriptor.m_Inputs.size() == 1);
442 BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
443 BOOST_TEST((queueDescriptor.m_Weight->GetTensorInfo() == TensorInfo({3, 3, 5, 3}, DataType::Float32)));
444 BOOST_TEST((queueDescriptor.m_Bias->GetTensorInfo() == TensorInfo({9}, DataType::Float32)));
446 // Returns so we can do extra, backend-specific tests.
450 template <typename FullyConnectedWorkload, armnn::DataType DataType>
451 std::unique_ptr<FullyConnectedWorkload> CreateFullyConnectedWorkloadTest(armnn::IWorkloadFactory& factory,
454 // Creates the layer we're testing.
455 FullyConnectedDescriptor layerDesc;
456 layerDesc.m_BiasEnabled = true;
457 layerDesc.m_TransposeWeightMatrix = true;
459 FullyConnectedLayer* const layer = graph.AddLayer<FullyConnectedLayer>(layerDesc, "layer");
461 float inputsQScale = DataType == armnn::DataType::QuantisedAsymm8 ? 1.0f : 0.0;
462 float outputQScale = DataType == armnn::DataType::QuantisedAsymm8 ? 2.0f : 0.0;
464 layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({7, 20}, DataType, inputsQScale, 0));
465 layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({7}, GetBiasDataType(DataType), inputsQScale));
466 layer->m_Weight->Allocate();
467 layer->m_Bias->Allocate();
469 // Creates extra layers.
470 Layer* const input = graph.AddLayer<InputLayer>(0, "input");
471 Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
474 Connect(input, layer, TensorInfo({3, 1, 4, 5}, DataType, inputsQScale));
475 Connect(layer, output, TensorInfo({3, 7}, DataType, outputQScale));
476 CreateTensorHandles(graph, factory);
478 // Makes the workload and checks it.
479 auto workload = MakeAndCheckWorkload<FullyConnectedWorkload>(*layer, graph, factory);
481 FullyConnectedQueueDescriptor queueDescriptor = workload->GetData();
482 BOOST_TEST(queueDescriptor.m_Parameters.m_BiasEnabled == true);
483 BOOST_TEST(queueDescriptor.m_Parameters.m_TransposeWeightMatrix == true);
485 BOOST_TEST(queueDescriptor.m_Inputs.size() == 1);
486 BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
487 BOOST_TEST((queueDescriptor.m_Weight->GetTensorInfo() == TensorInfo({7, 20}, DataType, inputsQScale)));
488 BOOST_TEST((queueDescriptor.m_Bias->GetTensorInfo() == TensorInfo({7}, GetBiasDataType(DataType), inputsQScale)));
490 // Returns so we can do extra, backend-specific tests.
494 template <typename NormalizationWorkload, armnn::DataType DataType>
495 std::unique_ptr<NormalizationWorkload> CreateNormalizationWorkloadTest(armnn::IWorkloadFactory& factory,
497 DataLayout dataLayout = DataLayout::NCHW)
499 // Creates the layer we're testing.
500 NormalizationDescriptor layerDesc;
501 layerDesc.m_NormChannelType = NormalizationAlgorithmChannel::Across;
502 layerDesc.m_NormMethodType = NormalizationAlgorithmMethod::LocalBrightness;
503 layerDesc.m_NormSize = 3;
504 layerDesc.m_Alpha = 0.5f;
505 layerDesc.m_Beta = -1.0f;
506 layerDesc.m_K = 0.2f;
507 layerDesc.m_DataLayout = dataLayout;
509 NormalizationLayer* layer = graph.AddLayer<NormalizationLayer>(layerDesc, "layer");
511 // Creates extra layers.
512 Layer* const input = graph.AddLayer<InputLayer>(0, "input");
513 Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
516 Connect(input, layer, TensorInfo({3, 5, 5, 1}, DataType));
517 Connect(layer, output, TensorInfo({3, 5, 5, 1}, DataType));
518 CreateTensorHandles(graph, factory);
520 // Makes the workload and checks it.
521 auto workload = MakeAndCheckWorkload<NormalizationWorkload>(*layer, graph, factory);
523 NormalizationQueueDescriptor queueDescriptor = workload->GetData();
524 BOOST_TEST((queueDescriptor.m_Parameters.m_NormChannelType == NormalizationAlgorithmChannel::Across));
525 BOOST_TEST((queueDescriptor.m_Parameters.m_NormMethodType == NormalizationAlgorithmMethod::LocalBrightness));
526 BOOST_TEST(queueDescriptor.m_Parameters.m_NormSize == 3);
527 BOOST_TEST(queueDescriptor.m_Parameters.m_Alpha == 0.5f);
528 BOOST_TEST(queueDescriptor.m_Parameters.m_Beta == -1.0f);
529 BOOST_TEST(queueDescriptor.m_Parameters.m_K == 0.2f);
530 BOOST_TEST((queueDescriptor.m_Parameters.m_DataLayout == dataLayout));
532 BOOST_TEST(queueDescriptor.m_Inputs.size() == 1);
533 BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
535 // Returns so we can do extra, backend-specific tests.
539 template <typename Pooling2dWorkload, armnn::DataType DataType>
540 std::unique_ptr<Pooling2dWorkload> CreatePooling2dWorkloadTest(armnn::IWorkloadFactory& factory,
542 DataLayout dataLayout = DataLayout::NCHW)
544 // Creates the layer we're testing.
545 Pooling2dDescriptor layerDesc;
546 layerDesc.m_PoolType = PoolingAlgorithm::Average;
547 layerDesc.m_PoolWidth = 3;
548 layerDesc.m_PoolHeight = 3;
549 layerDesc.m_PadLeft = 2;
550 layerDesc.m_PadRight = 2;
551 layerDesc.m_PadTop = 1;
552 layerDesc.m_PadBottom = 1;
553 layerDesc.m_StrideX = 2;
554 layerDesc.m_StrideY = 3;
555 layerDesc.m_OutputShapeRounding = OutputShapeRounding::Floor;
556 layerDesc.m_DataLayout = dataLayout;
558 Pooling2dLayer* const layer = graph.AddLayer<Pooling2dLayer>(layerDesc, "layer");
560 // Create extra layers
561 Layer* const input = graph.AddLayer<InputLayer>(0, "input");
562 Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
564 TensorShape inputShape = (dataLayout == DataLayout::NCHW) ? TensorShape{3, 2, 5, 5} : TensorShape{3, 5, 5, 2};
565 TensorShape outputShape = (dataLayout == DataLayout::NCHW) ? TensorShape{3, 2, 2, 4} : TensorShape{3, 2, 4, 2};
568 Connect(input, layer, TensorInfo(inputShape, DataType));
569 Connect(layer, output, TensorInfo(outputShape, DataType));
570 CreateTensorHandles(graph, factory);
572 // Make the workload and checks it
573 auto workload = MakeAndCheckWorkload<Pooling2dWorkload>(*layer, graph, factory);
575 Pooling2dQueueDescriptor queueDescriptor = workload->GetData();
576 BOOST_TEST((queueDescriptor.m_Parameters.m_PoolType == PoolingAlgorithm::Average));
577 BOOST_TEST((queueDescriptor.m_Parameters.m_OutputShapeRounding == OutputShapeRounding::Floor));
578 BOOST_TEST(queueDescriptor.m_Parameters.m_PoolWidth == 3);
579 BOOST_TEST(queueDescriptor.m_Parameters.m_PoolHeight == 3);
580 BOOST_TEST(queueDescriptor.m_Parameters.m_StrideX == 2);
581 BOOST_TEST(queueDescriptor.m_Parameters.m_StrideY == 3);
582 BOOST_TEST(queueDescriptor.m_Parameters.m_PadLeft == 2);
583 BOOST_TEST(queueDescriptor.m_Parameters.m_PadRight == 2);
584 BOOST_TEST(queueDescriptor.m_Parameters.m_PadTop == 1);
585 BOOST_TEST(queueDescriptor.m_Parameters.m_PadBottom == 1);
587 BOOST_TEST(queueDescriptor.m_Inputs.size() == 1);
588 BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
590 // Return so we can do extra, backend-specific tests
594 template <typename SoftmaxWorkload, armnn::DataType DataType>
595 std::unique_ptr<SoftmaxWorkload> CreateSoftmaxWorkloadTest(armnn::IWorkloadFactory& factory,
598 // Create the layer we're testing.
599 SoftmaxDescriptor softmaxDescriptor;
600 Layer* const layer = graph.AddLayer<SoftmaxLayer>(softmaxDescriptor, "layer");
602 // Create extra layers.
603 Layer* const input = graph.AddLayer<InputLayer>(0, "input");
604 Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
607 armnn::TensorInfo tensorInfo({4, 1}, DataType);
608 Connect(input, layer, tensorInfo);
609 Connect(layer, output, tensorInfo);
610 CreateTensorHandles(graph, factory);
612 // Make the workload and checks it.
613 auto workload = MakeAndCheckWorkload<SoftmaxWorkload>(*layer, graph, factory);
615 SoftmaxQueueDescriptor queueDescriptor = workload->GetData();
616 BOOST_TEST(queueDescriptor.m_Inputs.size() == 1);
617 BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
619 // Return so we can do extra, backend-specific tests.
623 template<typename SplitterWorkload, armnn::DataType DataType>
624 std::unique_ptr<SplitterWorkload>
625 CreateSplitterWorkloadTest(armnn::IWorkloadFactory& factory, armnn::Graph& graph)
627 // Create the layer we're testing.
628 // NOTE: need three dimensions channels, height/y, width/x because the Compute
629 // library restricts subtensors to have the same x and y dimensions as
630 // their parent tensors, and therefore the origin on the x and y dimension
631 // has to be zero for any view. So we need a third dimension to split...
632 // NOTE: arguments are: number of views, number of dimensions.
633 ViewsDescriptor layerDesc(3, 3);
634 // NOTE: arguments are: view, dimension, value.
635 layerDesc.SetViewOriginCoord(0, 0, 0);
636 layerDesc.SetViewOriginCoord(1, 0, 1);
637 layerDesc.SetViewOriginCoord(2, 0, 3);
639 Layer* const layer = graph.AddLayer<SplitterLayer>(layerDesc, "layer");
641 // Adds extra layers.
642 Layer* const input = graph.AddLayer<InputLayer>(0, "input");
643 Layer* const output0 = graph.AddLayer<OutputLayer>(0, "output0");
644 Layer* const output1 = graph.AddLayer<OutputLayer>(1, "output1");
645 Layer* const output2 = graph.AddLayer<OutputLayer>(2, "output2");
648 armnn::TensorInfo tensorInfo({5, 7, 7}, DataType);
649 Connect(input, layer, tensorInfo);
651 armnn::TensorInfo output0Info({1, 7, 7}, DataType);
652 armnn::TensorInfo output1Info({2, 7, 7}, DataType);
653 armnn::TensorInfo output2Info({2, 7, 7}, DataType);
655 Connect(layer, output0, output0Info, 0, 0);
656 Connect(layer, output1, output1Info, 1, 0);
657 Connect(layer, output2, output2Info, 2, 0);
659 CreateTensorHandles(graph, factory);
661 // Makes the workload and checks it.
662 auto workload = MakeAndCheckWorkload<SplitterWorkload>(*layer, graph, factory);
664 SplitterQueueDescriptor queueDescriptor = workload->GetData();
665 BOOST_TEST(queueDescriptor.m_Inputs.size() == 1);
666 BOOST_TEST(queueDescriptor.m_Outputs.size() == 3);
667 BOOST_TEST(queueDescriptor.m_ViewOrigins.size() == 3);
669 BOOST_TEST(queueDescriptor.m_ViewOrigins[0].m_Origin[0] == 0);
670 BOOST_TEST(queueDescriptor.m_ViewOrigins[1].m_Origin[0] == 1);
671 BOOST_TEST(queueDescriptor.m_ViewOrigins[2].m_Origin[0] == 3);
672 BOOST_TEST(queueDescriptor.m_ViewOrigins[0].m_Origin[1] == 0);
673 BOOST_TEST(queueDescriptor.m_ViewOrigins[1].m_Origin[1] == 0);
674 BOOST_TEST(queueDescriptor.m_ViewOrigins[2].m_Origin[1] == 0);
675 BOOST_TEST(queueDescriptor.m_ViewOrigins[0].m_Origin[2] == 0);
676 BOOST_TEST(queueDescriptor.m_ViewOrigins[1].m_Origin[2] == 0);
677 BOOST_TEST(queueDescriptor.m_ViewOrigins[2].m_Origin[2] == 0);
679 // Returns so we can do extra, backend-specific tests.
683 /// This function constructs a graph with both a splitter and a merger, and returns a pair of the workloads.
684 template<typename SplitterWorkload, typename MergerWorkload, armnn::DataType DataType>
685 std::pair<std::unique_ptr<SplitterWorkload>, std::unique_ptr<MergerWorkload>>
686 CreateSplitterMergerWorkloadTest(armnn::IWorkloadFactory& factory, armnn::Graph& graph)
688 armnn::TensorInfo inputTensorInfo({ 1, 2, 100, 10 }, DataType);
690 armnn::TensorInfo splitTensorInfo1({ 1, 1, 100, 10 }, DataType);
691 armnn::TensorInfo splitTensorInfo2({ 1, 1, 100, 10 }, DataType);
693 //Constructs the graph.
694 Layer* const input = graph.AddLayer<InputLayer>(0, "input");
696 armnn::ViewsDescriptor splitterViews(2);
697 splitterViews.SetViewOriginCoord(0, 0, 0);
698 splitterViews.SetViewOriginCoord(0, 1, 0);
699 splitterViews.SetViewOriginCoord(0, 2, 0);
700 splitterViews.SetViewOriginCoord(0, 3, 0);
702 splitterViews.SetViewOriginCoord(1, 0, 0);
703 splitterViews.SetViewOriginCoord(1, 1, 1);
704 splitterViews.SetViewOriginCoord(1, 2, 0);
705 splitterViews.SetViewOriginCoord(1, 3, 0);
707 Layer* const splitter = graph.AddLayer<SplitterLayer>(splitterViews, "splitter");
708 BOOST_TEST_CHECKPOINT("created splitter layer");
710 armnn::OriginsDescriptor mergerViews(2);
711 mergerViews.SetViewOriginCoord(0, 0, 0);
712 mergerViews.SetViewOriginCoord(0, 1, 1);
713 mergerViews.SetViewOriginCoord(0, 2, 0);
714 mergerViews.SetViewOriginCoord(0, 3, 0);
716 mergerViews.SetViewOriginCoord(1, 0, 0);
717 mergerViews.SetViewOriginCoord(1, 1, 0);
718 mergerViews.SetViewOriginCoord(1, 2, 0);
719 mergerViews.SetViewOriginCoord(1, 3, 0);
721 Layer* const merger = graph.AddLayer<MergerLayer>(mergerViews, "merger");
722 BOOST_TEST_CHECKPOINT("created merger layer");
724 Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
727 Connect(input, splitter, inputTensorInfo, 0, 0);
728 BOOST_TEST_CHECKPOINT("connect input to splitter");
729 Connect(splitter, merger, splitTensorInfo1, 0, 1); // The splitter & merger are connected up.
730 BOOST_TEST_CHECKPOINT("connect splitter[0] to merger[1]");
731 Connect(splitter, merger, splitTensorInfo2, 1, 0); // So that the outputs are flipped round.
732 BOOST_TEST_CHECKPOINT("connect splitter[1] to merger[0]");
733 Connect(merger, output, inputTensorInfo, 0, 0);
734 BOOST_TEST_CHECKPOINT("connect merger to output");
736 CreateTensorHandles(graph, factory);
737 BOOST_TEST_CHECKPOINT("created tensor handles");
739 auto workloadSplitter = MakeAndCheckWorkload<SplitterWorkload>(*splitter, graph, factory);
740 BOOST_TEST_CHECKPOINT("created splitter workload");
741 auto workloadMerger = MakeAndCheckWorkload<MergerWorkload>(*merger, graph, factory);
742 BOOST_TEST_CHECKPOINT("created merger workload");
744 return {std::move(workloadSplitter), std::move(workloadMerger)};
748 /// This function constructs a graph with a splitter with two outputs. Each of the outputs is then
749 /// connected to two different activation layers
750 template<typename SplitterWorkload, typename ActivationWorkload, armnn::DataType DataType>
751 void CreateSplitterMultipleInputsOneOutputWorkloadTest(armnn::IWorkloadFactory& factory, armnn::Graph& graph,
752 std::unique_ptr<SplitterWorkload>& wlSplitter,
753 std::unique_ptr<ActivationWorkload>& wlActiv0_0,
754 std::unique_ptr<ActivationWorkload>& wlActiv0_1,
755 std::unique_ptr<ActivationWorkload>& wlActiv1_0,
756 std::unique_ptr<ActivationWorkload>& wlActiv1_1)
758 armnn::TensorInfo inputTensorInfo ({ 1, 3, 100, 50 }, DataType);
759 armnn::TensorInfo splitTensorInfo1({ 1, 1, 100, 50 }, DataType);
760 armnn::TensorInfo splitTensorInfo2({ 1, 2, 100, 50 }, DataType);
762 //Constructs the graph.
763 Layer* const input = graph.AddLayer<InputLayer>(0, "input");
765 armnn::ViewsDescriptor splitterViews(2);
767 splitterViews.SetViewOriginCoord(0, 0, 0);
768 splitterViews.SetViewOriginCoord(0, 1, 0);
769 splitterViews.SetViewOriginCoord(0, 2, 0);
770 splitterViews.SetViewOriginCoord(0, 3, 0);
772 splitterViews.SetViewOriginCoord(1, 0, 0);
773 splitterViews.SetViewOriginCoord(1, 1, 1);
774 splitterViews.SetViewOriginCoord(1, 2, 0);
775 splitterViews.SetViewOriginCoord(1, 3, 0);
777 Layer* const splitter = graph.AddLayer<SplitterLayer>(splitterViews, "splitter");
779 armnn::ActivationDescriptor activationDesc;
781 Layer* const activ0_0 = graph.AddLayer<ActivationLayer>(activationDesc, "activ0_0");
782 Layer* const activ0_1 = graph.AddLayer<ActivationLayer>(activationDesc, "activ0_1");
783 Layer* const activ1_0 = graph.AddLayer<ActivationLayer>(activationDesc, "activ1_0");
784 Layer* const activ1_1 = graph.AddLayer<ActivationLayer>(activationDesc, "activ1_1");
786 Layer* const output1 = graph.AddLayer<OutputLayer>(1, "output1");
787 Layer* const output2 = graph.AddLayer<OutputLayer>(2, "output2");
788 Layer* const output3 = graph.AddLayer<OutputLayer>(3, "output3");
789 Layer* const output4 = graph.AddLayer<OutputLayer>(4, "output4");
792 Connect(input, splitter, inputTensorInfo, 0, 0);
793 Connect(splitter, activ0_0, splitTensorInfo1, 0, 0);
794 Connect(splitter, activ0_1, splitTensorInfo1, 0, 0);
796 Connect(splitter, activ1_0, splitTensorInfo2, 1, 0);
797 Connect(splitter, activ1_1, splitTensorInfo2, 1, 0);
799 Connect(activ0_0, output1, splitTensorInfo1, 0, 0);
800 Connect(activ0_1, output2, splitTensorInfo1, 0, 0);
801 Connect(activ1_0, output3, splitTensorInfo2, 0, 0);
802 Connect(activ1_1, output4, splitTensorInfo2, 0, 0);
804 CreateTensorHandles(graph, factory);
806 auto workloadSplitter = MakeAndCheckWorkload<SplitterWorkload>(*splitter, graph, factory);
807 auto workloadActiv0_0 = MakeAndCheckWorkload<ActivationWorkload>(*activ0_0, graph, factory);
808 auto workloadActiv0_1 = MakeAndCheckWorkload<ActivationWorkload>(*activ0_1, graph, factory);
809 auto workloadActiv1_0 = MakeAndCheckWorkload<ActivationWorkload>(*activ1_0, graph, factory);
810 auto workloadActiv1_1 = MakeAndCheckWorkload<ActivationWorkload>(*activ1_1, graph, factory);
812 wlSplitter = std::move(workloadSplitter);
813 wlActiv0_0 = std::move(workloadActiv0_0);
814 wlActiv0_1 = std::move(workloadActiv0_1);
815 wlActiv1_0 = std::move(workloadActiv1_0);
816 wlActiv1_1 = std::move(workloadActiv1_1);
819 template <typename ResizeBilinearWorkload, armnn::DataType DataType>
820 std::unique_ptr<ResizeBilinearWorkload> CreateResizeBilinearWorkloadTest(armnn::IWorkloadFactory& factory,
822 DataLayout dataLayout = DataLayout::NCHW)
824 TensorShape inputShape;
825 TensorShape outputShape;
826 unsigned int heightIndex;
827 unsigned int widthIndex;
829 switch (dataLayout) {
830 case DataLayout::NHWC:
831 inputShape = { 2, 4, 4, 3 };
832 outputShape = { 2, 2, 2, 3 };
837 inputShape = { 2, 3, 4, 4 };
838 outputShape = { 2, 3, 2, 2 };
843 // Creates the layer we're testing.
844 ResizeBilinearDescriptor resizeDesc;
845 resizeDesc.m_TargetWidth = outputShape[widthIndex];
846 resizeDesc.m_TargetHeight = outputShape[heightIndex];
847 resizeDesc.m_DataLayout = dataLayout;
848 Layer* const layer = graph.AddLayer<ResizeBilinearLayer>(resizeDesc, "layer");
850 // Creates extra layers.
851 Layer* const input = graph.AddLayer<InputLayer>(0, "input");
852 Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
855 armnn::TensorInfo inputTensorInfo(inputShape, DataType);
856 armnn::TensorInfo outputTensorInfo(outputShape, DataType);
857 Connect(input, layer, inputTensorInfo);
858 Connect(layer, output, outputTensorInfo);
859 CreateTensorHandles(graph, factory);
861 // Makes the workload and checks it.
862 auto workload = MakeAndCheckWorkload<ResizeBilinearWorkload>(*layer, graph, factory);
864 ResizeBilinearQueueDescriptor queueDescriptor = workload->GetData();
865 BOOST_TEST(queueDescriptor.m_Inputs.size() == 1);
866 BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
867 BOOST_TEST((queueDescriptor.m_Parameters.m_DataLayout == dataLayout));
869 // Returns so we can do extra, backend-specific tests.
873 template <typename L2NormalizationWorkload, armnn::DataType DataType>
874 std::unique_ptr<L2NormalizationWorkload> CreateL2NormalizationWorkloadTest(armnn::IWorkloadFactory& factory,
875 armnn::Graph& graph, DataLayout dataLayout = DataLayout::NCHW)
877 // Creates the layer we're testing.
878 L2NormalizationDescriptor layerDesc;
879 layerDesc.m_DataLayout = dataLayout;
881 Layer* const layer = graph.AddLayer<L2NormalizationLayer>(layerDesc, "l2norm");
883 // Creates extra layers.
884 Layer* const input = graph.AddLayer<InputLayer>(0, "input");
885 Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
888 armnn::TensorInfo inputTensorInfo({ 5, 20, 50, 67 }, DataType);
889 armnn::TensorInfo outputTensorInfo({ 5, 20, 50, 67 }, DataType);
890 Connect(input, layer, inputTensorInfo);
891 Connect(layer, output, outputTensorInfo);
892 CreateTensorHandles(graph, factory);
894 // Makes the workload and checks it.
895 auto workload = MakeAndCheckWorkload<L2NormalizationWorkload>(*layer, graph, factory);
897 L2NormalizationQueueDescriptor queueDescriptor = workload->GetData();
898 BOOST_TEST((queueDescriptor.m_Parameters.m_DataLayout == dataLayout));
899 BOOST_TEST(queueDescriptor.m_Inputs.size() == 1);
900 BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
902 // Returns so we can do extra, backend-specific tests.
906 template <typename ReshapeWorkload, armnn::DataType DataType>
907 std::unique_ptr<ReshapeWorkload> CreateReshapeWorkloadTest(armnn::IWorkloadFactory& factory,
910 // Creates the layer we're testing.
911 TensorShape outputShape({ 1, 4 });
912 ReshapeDescriptor reshapeDesc;
913 reshapeDesc.m_TargetShape = outputShape;
914 Layer* const layer = graph.AddLayer<ReshapeLayer>(reshapeDesc, "layer");
916 // Creates extra layers.
917 Layer* const input = graph.AddLayer<InputLayer>(0, "input");
918 Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
921 armnn::TensorInfo inputTensorInfo({ 4, 1 }, DataType);
922 armnn::TensorInfo outputTensorInfo(outputShape, DataType);
923 Connect(input, layer, inputTensorInfo);
924 Connect(layer, output, outputTensorInfo);
925 CreateTensorHandles(graph, factory);
927 // Makes the workload and checks it.
928 auto workload = MakeAndCheckWorkload<ReshapeWorkload>(*layer, graph, factory);
930 ReshapeQueueDescriptor queueDescriptor = workload->GetData();
931 BOOST_TEST(queueDescriptor.m_Inputs.size() == 1);
932 BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
934 // Returns so we can do extra, backend-specific tests.
938 template <typename ConvertFp16ToFp32Float32Workload>
939 std::unique_ptr<ConvertFp16ToFp32Float32Workload> CreateConvertFp16ToFp32WorkloadTest(
940 armnn::IWorkloadFactory& factory, armnn::Graph& graph)
942 // Creates the layer we're testing.
943 ConvertFp16ToFp32Layer* const layer = graph.AddLayer<ConvertFp16ToFp32Layer>("Fp16ToFp32Converter");
945 // Creates extra layers.
946 Layer* const input = graph.AddLayer<InputLayer>(0, "input");
947 Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
950 armnn::TensorInfo inputTensorInfo({1, 3, 2, 3}, armnn::DataType::Float16);
951 armnn::TensorInfo outputTensorInfo({1, 3, 2, 3}, armnn::DataType::Float32);
952 Connect(input, layer, inputTensorInfo);
953 Connect(layer, output, outputTensorInfo);
954 CreateTensorHandles(graph, factory);
956 // Makes the workload and checks it.
957 auto workload = MakeAndCheckWorkload<ConvertFp16ToFp32Float32Workload>(*layer, graph, factory);
959 ConvertFp16ToFp32QueueDescriptor queueDescriptor = workload->GetData();
960 BOOST_TEST(queueDescriptor.m_Inputs.size() == 1);
961 BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
963 // Returns so we can do extra, backend-specific tests.
967 template <typename ConvertFp32ToFp16Float16Workload>
968 std::unique_ptr<ConvertFp32ToFp16Float16Workload> CreateConvertFp32ToFp16WorkloadTest(
969 armnn::IWorkloadFactory& factory, armnn::Graph& graph)
971 // Creates the layer we're testing.
972 ConvertFp32ToFp16Layer* const layer = graph.AddLayer<ConvertFp32ToFp16Layer>("Fp32ToFp16Converter");
974 // Creates extra layers.
975 Layer* const input = graph.AddLayer<InputLayer>(0, "input");
976 Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
979 armnn::TensorInfo inputTensorInfo({1, 3, 2, 3}, armnn::DataType::Float32);
980 armnn::TensorInfo outputTensorInfo({1, 3, 2, 3}, armnn::DataType::Float16);
981 Connect(input, layer, inputTensorInfo);
982 Connect(layer, output, outputTensorInfo);
983 CreateTensorHandles(graph, factory);
985 // Makes the workload and checks it.
986 auto workload = MakeAndCheckWorkload<ConvertFp32ToFp16Float16Workload>(*layer, graph, factory);
988 ConvertFp32ToFp16QueueDescriptor queueDescriptor = workload->GetData();
989 BOOST_TEST(queueDescriptor.m_Inputs.size() == 1);
990 BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
992 // Returns so we can do extra, backend-specific tests.