caf5e588f761202b0bb4090989ed85738ece6e64
[platform/upstream/armnn.git] / src / backends / backendsCommon / test / IsLayerSupportedTestImpl.hpp
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #pragma once
6
7 #include <Graph.hpp>
8
9 #include <backendsCommon/WorkloadFactory.hpp>
10
11 #include <boost/core/ignore_unused.hpp>
12
13 namespace
14 {
15 armnn::Graph dummyGraph;
16
17 // Make a dummy TensorInfo object.
18 template<armnn::DataType DataType>
19 armnn::TensorInfo MakeDummyTensorInfo()
20 {
21     return armnn::TensorInfo({2,2,2,2}, DataType);
22 }
23
24
25 // Make a dummy WorkloadInfo using a dummy TensorInfo.
26 template<armnn::DataType DataType>
27 armnn::WorkloadInfo MakeDummyWorkloadInfo(unsigned int numInputs, unsigned int numOutputs)
28 {
29     armnn::WorkloadInfo info;
30
31     for (unsigned int i=0; i < numInputs; i++)
32     {
33         info.m_InputTensorInfos.push_back(MakeDummyTensorInfo<DataType>());
34     }
35
36     for (unsigned int o=0; o < numOutputs; o++)
37     {
38         info.m_OutputTensorInfos.push_back(MakeDummyTensorInfo<DataType>());
39     }
40
41     return info;
42 }
43
44 // Template class to create a dummy layer (2 parameters).
45 template<typename LayerType, typename DescType = typename LayerType::DescriptorType>
46 struct DummyLayer
47 {
48     DummyLayer()
49     {
50         m_Layer = dummyGraph.AddLayer<LayerType>(DescType(), "");
51     }
52
53     ~DummyLayer()
54     {
55         dummyGraph.EraseLayer(m_Layer);
56     }
57
58     LayerType* m_Layer;
59 };
60
61 // Template class to create a dummy layer (1 parameter).
62 template<typename LayerType>
63 struct DummyLayer<LayerType, void>
64 {
65     DummyLayer()
66     {
67         m_Layer = dummyGraph.AddLayer<LayerType>("");
68     }
69
70     ~DummyLayer()
71     {
72         dummyGraph.EraseLayer(m_Layer);
73     }
74
75     LayerType* m_Layer;
76 };
77
78 template<>
79 struct DummyLayer<armnn::BatchNormalizationLayer>
80 {
81     DummyLayer()
82     {
83         m_Layer = dummyGraph.AddLayer<armnn::BatchNormalizationLayer>(armnn::BatchNormalizationDescriptor(), "");
84         m_Layer->m_Mean = std::make_unique<armnn::ScopedCpuTensorHandle>(
85             armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
86         m_Layer->m_Variance = std::make_unique<armnn::ScopedCpuTensorHandle>(
87             armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
88         m_Layer->m_Beta = std::make_unique<armnn::ScopedCpuTensorHandle>(
89             armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
90         m_Layer->m_Gamma = std::make_unique<armnn::ScopedCpuTensorHandle>(
91             armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
92     }
93
94     ~DummyLayer()
95     {
96         dummyGraph.EraseLayer(m_Layer);
97     }
98
99     armnn::BatchNormalizationLayer* m_Layer;
100 };
101
102 template<>
103 struct DummyLayer<armnn::BatchToSpaceNdLayer>
104 {
105     DummyLayer()
106     {
107         m_Layer = dummyGraph.AddLayer<armnn::BatchToSpaceNdLayer>(armnn::BatchToSpaceNdDescriptor(), "");
108     }
109
110     ~DummyLayer()
111     {
112         dummyGraph.EraseLayer(m_Layer);
113     }
114
115     armnn::BatchToSpaceNdLayer* m_Layer;
116 };
117
118 template<>
119 struct DummyLayer<armnn::ConstantLayer, void>
120 {
121     DummyLayer()
122     {
123         m_Layer = dummyGraph.AddLayer<armnn::ConstantLayer>("");
124     }
125
126     ~DummyLayer()
127     {
128         dummyGraph.EraseLayer(m_Layer);
129     }
130
131     armnn::ConstantLayer* m_Layer;
132 };
133
134 template<>
135 struct DummyLayer<armnn::InputLayer, armnn::LayerBindingId>
136 {
137     DummyLayer()
138     {
139         m_Layer = dummyGraph.AddLayer<armnn::InputLayer>(armnn::LayerBindingId(), "");
140     }
141
142     ~DummyLayer()
143     {
144         dummyGraph.EraseLayer(m_Layer);
145     }
146
147     armnn::InputLayer* m_Layer;
148 };
149
150 template<>
151 struct DummyLayer<armnn::ConcatLayer>
152 {
153     DummyLayer()
154     {
155         armnn::OriginsDescriptor desc(2);
156         m_Layer = dummyGraph.AddLayer<armnn::ConcatLayer>(desc, "");
157     }
158
159     ~DummyLayer()
160     {
161         dummyGraph.EraseLayer(m_Layer);
162     }
163
164     armnn::ConcatLayer* m_Layer;
165 };
166
167 template<>
168 struct DummyLayer<armnn::OutputLayer, armnn::LayerBindingId>
169 {
170     DummyLayer()
171     {
172         m_Layer = dummyGraph.AddLayer<armnn::OutputLayer>(armnn::LayerBindingId(), "");
173     }
174
175     ~DummyLayer()
176     {
177         dummyGraph.EraseLayer(m_Layer);
178     }
179
180     armnn::OutputLayer* m_Layer;
181 };
182
183 template<>
184 struct DummyLayer<armnn::SplitterLayer>
185 {
186     DummyLayer()
187     {
188         armnn::ViewsDescriptor desc(1);
189         m_Layer = dummyGraph.AddLayer<armnn::SplitterLayer>(desc, "");
190     }
191
192     ~DummyLayer()
193     {
194         dummyGraph.EraseLayer(m_Layer);
195     }
196
197     armnn::SplitterLayer* m_Layer;
198 };
199
200 template <typename ConvolutionLayerType>
201 struct DummyConvolutionLayer
202 {
203     DummyConvolutionLayer()
204     {
205         typename ConvolutionLayerType::DescriptorType desc;
206         m_Layer = dummyGraph.AddLayer<ConvolutionLayerType>(desc, "");
207         m_Layer->m_Weight = std::make_unique<armnn::ScopedCpuTensorHandle>(
208             armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
209         m_Layer->m_Bias = std::make_unique<armnn::ScopedCpuTensorHandle>(
210             armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
211     }
212
213     ~DummyConvolutionLayer()
214     {
215         dummyGraph.EraseLayer(m_Layer);
216     }
217
218     ConvolutionLayerType* m_Layer;
219 };
220
221 template<>
222 struct DummyLayer<armnn::Convolution2dLayer>
223     : public DummyConvolutionLayer<armnn::Convolution2dLayer>
224 {
225 };
226
227 template<>
228 struct DummyLayer<armnn::DepthwiseConvolution2dLayer>
229     : public DummyConvolutionLayer<armnn::DepthwiseConvolution2dLayer>
230 {
231 };
232
233 template<>
234 struct DummyLayer<armnn::TransposeConvolution2dLayer>
235     : public DummyConvolutionLayer<armnn::TransposeConvolution2dLayer>
236 {
237 };
238
239 template <typename LstmLayerType>
240 struct DummyLstmLayer
241 {
242     DummyLstmLayer()
243     {
244         typename LstmLayerType::DescriptorType desc;
245         desc.m_CifgEnabled = false;
246
247         m_Layer = dummyGraph.AddLayer<LstmLayerType>(armnn::LstmDescriptor(), "");
248         m_Layer->m_BasicParameters.m_InputToForgetWeights     = std::make_unique<armnn::ScopedCpuTensorHandle>(
249                 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
250         m_Layer->m_BasicParameters.m_InputToCellWeights       = std::make_unique<armnn::ScopedCpuTensorHandle>(
251                 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
252         m_Layer->m_BasicParameters.m_InputToOutputWeights     = std::make_unique<armnn::ScopedCpuTensorHandle>(
253                 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
254         m_Layer->m_BasicParameters.m_RecurrentToForgetWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
255                 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
256         m_Layer->m_BasicParameters.m_RecurrentToCellWeights   = std::make_unique<armnn::ScopedCpuTensorHandle>(
257                 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
258         m_Layer->m_BasicParameters.m_RecurrentToOutputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
259                 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
260         m_Layer->m_BasicParameters.m_ForgetGateBias           = std::make_unique<armnn::ScopedCpuTensorHandle>(
261                 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
262         m_Layer->m_BasicParameters.m_CellBias                 = std::make_unique<armnn::ScopedCpuTensorHandle>(
263                 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
264         m_Layer->m_BasicParameters.m_OutputGateBias           = std::make_unique<armnn::ScopedCpuTensorHandle>(
265                 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
266
267         m_Layer->m_CifgParameters.m_InputToInputWeights        = std::make_unique<armnn::ScopedCpuTensorHandle>(
268                 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
269         m_Layer->m_CifgParameters.m_RecurrentToInputWeights    = std::make_unique<armnn::ScopedCpuTensorHandle>(
270                 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
271         m_Layer->m_CifgParameters.m_CellToInputWeights         = std::make_unique<armnn::ScopedCpuTensorHandle>(
272                 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
273         m_Layer->m_CifgParameters.m_InputGateBias              = std::make_unique<armnn::ScopedCpuTensorHandle>(
274                 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
275     }
276
277     ~DummyLstmLayer()
278     {
279         dummyGraph.EraseLayer(m_Layer);
280     }
281
282     armnn::LstmLayer* m_Layer;
283 };
284
285 template<>
286 struct DummyLayer<armnn::LstmLayer>
287         : public DummyLstmLayer<armnn::LstmLayer>
288 {
289 };
290
291 template<>
292 struct DummyLayer<armnn::QuantizedLstmLayer, void>
293 {
294     DummyLayer()
295     {
296         m_Layer = dummyGraph.AddLayer<armnn::QuantizedLstmLayer>("");
297
298         m_Layer->m_QuantizedLstmParameters.m_InputToInputWeights  = std::make_unique<armnn::ScopedCpuTensorHandle>(
299                 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QuantisedAsymm8));
300         m_Layer->m_QuantizedLstmParameters.m_InputToForgetWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
301                 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QuantisedAsymm8));
302         m_Layer->m_QuantizedLstmParameters.m_InputToCellWeights   = std::make_unique<armnn::ScopedCpuTensorHandle>(
303                 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QuantisedAsymm8));
304         m_Layer->m_QuantizedLstmParameters.m_InputToOutputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
305                 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QuantisedAsymm8));
306
307         m_Layer->m_QuantizedLstmParameters.m_RecurrentToInputWeights  = std::make_unique<armnn::ScopedCpuTensorHandle>(
308                 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QuantisedAsymm8));
309         m_Layer->m_QuantizedLstmParameters.m_RecurrentToForgetWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
310                 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QuantisedAsymm8));
311         m_Layer->m_QuantizedLstmParameters.m_RecurrentToCellWeights   = std::make_unique<armnn::ScopedCpuTensorHandle>(
312                 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QuantisedAsymm8));
313         m_Layer->m_QuantizedLstmParameters.m_RecurrentToOutputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
314                 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QuantisedAsymm8));
315
316         m_Layer->m_QuantizedLstmParameters.m_InputGateBias  = std::make_unique<armnn::ScopedCpuTensorHandle>(
317                 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Signed32));
318         m_Layer->m_QuantizedLstmParameters.m_ForgetGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
319                 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Signed32));
320         m_Layer->m_QuantizedLstmParameters.m_CellBias       = std::make_unique<armnn::ScopedCpuTensorHandle>(
321                 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Signed32));
322         m_Layer->m_QuantizedLstmParameters.m_OutputGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
323                 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Signed32));
324     }
325
326     ~DummyLayer()
327     {
328         dummyGraph.EraseLayer(m_Layer);
329     }
330
331     armnn::QuantizedLstmLayer* m_Layer;
332 };
333
334 template<>
335 struct DummyLayer<armnn::FullyConnectedLayer>
336 {
337     DummyLayer()
338     {
339         armnn::FullyConnectedLayer::DescriptorType desc;
340         m_Layer = dummyGraph.AddLayer<armnn::FullyConnectedLayer>(desc, "");
341         m_Layer->m_Weight = std::make_unique<armnn::ScopedCpuTensorHandle>(
342             armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
343     }
344
345     ~DummyLayer()
346     {
347         dummyGraph.EraseLayer(m_Layer);
348     }
349
350     armnn::FullyConnectedLayer* m_Layer;
351 };
352
353 // Tag for giving LayerType entries a unique strong type each.
354 template<armnn::LayerType>
355 struct Tag{};
356
357 #define DECLARE_LAYER_POLICY_CUSTOM_PARAM(name, descType) \
358 template<armnn::DataType DataType> \
359 struct LayerTypePolicy<armnn::LayerType::name, DataType> \
360 { \
361     using Type = armnn::name##Layer; \
362     using Desc = descType; \
363     using QueueDesc = armnn::name##QueueDescriptor; \
364     constexpr static const char* NameStr = #name; \
365     \
366     static std::unique_ptr<armnn::IWorkload> MakeDummyWorkload(armnn::IWorkloadFactory *factory, \
367         unsigned int nIn, unsigned int nOut) \
368     { \
369         QueueDesc desc; \
370         armnn::WorkloadInfo info = MakeDummyWorkloadInfo<DataType>(nIn, nOut); \
371         return factory->Create##name(desc, info); \
372     } \
373 };
374
375 // Define a layer policy specialization for use with the IsLayerSupported tests.
376 // Use this version for layers whose constructor takes 1 parameter(name).
377 #define DECLARE_LAYER_POLICY_1_PARAM(name) DECLARE_LAYER_POLICY_CUSTOM_PARAM(name, void)
378
379 // Define a layer policy specialization for use with the IsLayerSupported tests.
380 // Use this version for layers whose constructor takes 2 parameters(descriptor and name).
381 #define DECLARE_LAYER_POLICY_2_PARAM(name) DECLARE_LAYER_POLICY_CUSTOM_PARAM(name, armnn::name##Descriptor)
382
383 // Layer policy template.
384 template<armnn::LayerType Type, armnn::DataType DataType>
385 struct LayerTypePolicy;
386
387 // Every entry in the armnn::LayerType enum must be accounted for below.
388 DECLARE_LAYER_POLICY_1_PARAM(Abs)
389
390 DECLARE_LAYER_POLICY_2_PARAM(Activation)
391
392 DECLARE_LAYER_POLICY_1_PARAM(Addition)
393
394 DECLARE_LAYER_POLICY_2_PARAM(BatchNormalization)
395
396 DECLARE_LAYER_POLICY_2_PARAM(BatchToSpaceNd)
397
398 DECLARE_LAYER_POLICY_2_PARAM(Concat)
399
400 DECLARE_LAYER_POLICY_1_PARAM(Constant)
401
402 DECLARE_LAYER_POLICY_1_PARAM(ConvertFp16ToFp32)
403
404 DECLARE_LAYER_POLICY_1_PARAM(ConvertFp32ToFp16)
405
406 DECLARE_LAYER_POLICY_2_PARAM(Convolution2d)
407
408 DECLARE_LAYER_POLICY_1_PARAM(MemCopy)
409
410 DECLARE_LAYER_POLICY_1_PARAM(MemImport)
411
412 DECLARE_LAYER_POLICY_1_PARAM(Debug)
413
414 DECLARE_LAYER_POLICY_2_PARAM(DepthwiseConvolution2d)
415
416 DECLARE_LAYER_POLICY_1_PARAM(Dequantize)
417
418 DECLARE_LAYER_POLICY_2_PARAM(DetectionPostProcess)
419
420 DECLARE_LAYER_POLICY_1_PARAM(Equal)
421
422 DECLARE_LAYER_POLICY_2_PARAM(FakeQuantization)
423
424 DECLARE_LAYER_POLICY_1_PARAM(Floor)
425
426 DECLARE_LAYER_POLICY_2_PARAM(FullyConnected)
427
428 DECLARE_LAYER_POLICY_1_PARAM(Gather)
429
430 DECLARE_LAYER_POLICY_1_PARAM(Greater)
431
432 DECLARE_LAYER_POLICY_CUSTOM_PARAM(Input, armnn::LayerBindingId)
433
434 DECLARE_LAYER_POLICY_2_PARAM(L2Normalization)
435
436 DECLARE_LAYER_POLICY_2_PARAM(Lstm)
437
438 DECLARE_LAYER_POLICY_1_PARAM(Maximum)
439
440 DECLARE_LAYER_POLICY_2_PARAM(Mean)
441
442 DECLARE_LAYER_POLICY_1_PARAM(Merge)
443
444 DECLARE_LAYER_POLICY_1_PARAM(Minimum)
445
446 DECLARE_LAYER_POLICY_1_PARAM(Multiplication)
447
448 DECLARE_LAYER_POLICY_2_PARAM(Normalization)
449
450 DECLARE_LAYER_POLICY_CUSTOM_PARAM(Output, armnn::LayerBindingId)
451
452 DECLARE_LAYER_POLICY_2_PARAM(Pad)
453
454 DECLARE_LAYER_POLICY_1_PARAM(Quantize)
455
456 DECLARE_LAYER_POLICY_2_PARAM(Permute)
457
458 DECLARE_LAYER_POLICY_2_PARAM(Pooling2d)
459
460 DECLARE_LAYER_POLICY_2_PARAM(PreCompiled)
461
462 DECLARE_LAYER_POLICY_1_PARAM(Prelu)
463
464 DECLARE_LAYER_POLICY_1_PARAM(QuantizedLstm)
465
466 DECLARE_LAYER_POLICY_1_PARAM(Division)
467
468 DECLARE_LAYER_POLICY_2_PARAM(Resize)
469
470 DECLARE_LAYER_POLICY_2_PARAM(Reshape)
471
472 DECLARE_LAYER_POLICY_1_PARAM(Rsqrt)
473
474 DECLARE_LAYER_POLICY_2_PARAM(Softmax)
475
476 DECLARE_LAYER_POLICY_2_PARAM(SpaceToBatchNd)
477
478 DECLARE_LAYER_POLICY_2_PARAM(SpaceToDepth)
479
480 DECLARE_LAYER_POLICY_2_PARAM(Splitter)
481
482 DECLARE_LAYER_POLICY_2_PARAM(Stack)
483
484 DECLARE_LAYER_POLICY_2_PARAM(StridedSlice)
485
486 DECLARE_LAYER_POLICY_1_PARAM(Subtraction)
487
488 DECLARE_LAYER_POLICY_1_PARAM(Switch)
489
490 DECLARE_LAYER_POLICY_2_PARAM(TransposeConvolution2d)
491
492
493 // Generic implementation to get the number of input slots for a given layer type;
494 template<armnn::LayerType Type>
495 unsigned int GetNumInputs(const armnn::Layer& layer)
496 {
497     return layer.GetNumInputSlots();
498 }
499
500 // Generic implementation to get the number of output slots for a given layer type;
501 template<armnn::LayerType Type>
502 unsigned int GetNumOutputs(const armnn::Layer& layer)
503 {
504     return layer.GetNumOutputSlots();
505 }
506
507 template<>
508 unsigned int GetNumInputs<armnn::LayerType::Concat>(const armnn::Layer& layer)
509 {
510     boost::ignore_unused(layer);
511     return 2;
512 }
513
514 // Tests that the IsLayerSupported() function returns the correct value.
515 // We determined the correct value by *trying* to create the relevant workload and seeing if it matches what we expect.
516 // Returns true if expectations are met, otherwise returns false.
517 template<typename FactoryType, armnn::DataType DataType, armnn::LayerType Type>
518 bool IsLayerSupportedTest(FactoryType *factory, Tag<Type>)
519 {
520     using LayerPolicy = LayerTypePolicy<Type, DataType>;
521     using LayerType = typename LayerPolicy::Type;
522     using LayerDesc = typename LayerPolicy::Desc;
523     DummyLayer<LayerType, LayerDesc> layer;
524
525     unsigned int numIn = GetNumInputs<Type>(*layer.m_Layer);
526     unsigned int numOut = GetNumOutputs<Type>(*layer.m_Layer);
527
528     // Make another dummy layer just to make IsLayerSupported have valid inputs.
529     DummyLayer<armnn::ConstantLayer, void> previousLayer;
530     // Set output of the previous layer to a dummy tensor.
531     armnn::TensorInfo output = MakeDummyTensorInfo<DataType>();
532     previousLayer.m_Layer->GetOutputSlot(0).SetTensorInfo(output);
533     // Connect all outputs of the previous layer to inputs of tested layer.
534     for (unsigned int i = 0; i < numIn; i++)
535     {
536         armnn::IOutputSlot& previousLayerOutputSlot = previousLayer.m_Layer->GetOutputSlot(0);
537         armnn::IInputSlot& layerInputSlot = layer.m_Layer->GetInputSlot(i);
538         previousLayerOutputSlot.Connect(layerInputSlot);
539     }
540     // Set outputs of tested layer to a dummy tensor.
541     for (unsigned int i = 0; i < numOut; i++)
542     {
543         layer.m_Layer->GetOutputSlot(0).SetTensorInfo(output);
544     }
545
546     std::string layerName = LayerPolicy::NameStr;
547     std::string reasonIfUnsupported;
548     if (FactoryType::IsLayerSupported(*layer.m_Layer, DataType, reasonIfUnsupported))
549     {
550         std::string errorMsg = " layer expected support but found none.";
551         try
552         {
553             bool retVal = LayerPolicy::MakeDummyWorkload(factory, numIn, numOut).get() != nullptr;
554             BOOST_CHECK_MESSAGE(retVal, layerName << errorMsg);
555             return retVal;
556         }
557         catch(const armnn::InvalidArgumentException& e)
558         {
559             boost::ignore_unused(e);
560             // This is ok since we throw InvalidArgumentException when creating the dummy workload.
561             return true;
562         }
563         catch(const std::exception& e)
564         {
565             errorMsg = e.what();
566             BOOST_TEST_ERROR(layerName << ": " << errorMsg);
567             return false;
568         }
569         catch(...)
570         {
571             errorMsg = "Unexpected error while testing support for ";
572             BOOST_TEST_ERROR(errorMsg << layerName);
573             return false;
574         }
575     }
576     else
577     {
578         std::string errorMsg = "layer expected no support (giving reason: " + reasonIfUnsupported + ") but found some.";
579         try
580         {
581             bool retVal = LayerPolicy::MakeDummyWorkload(factory, numIn, numOut).get() == nullptr;
582             BOOST_CHECK_MESSAGE(retVal, layerName << errorMsg);
583             return retVal;
584         }
585         // These two exceptions are ok: For workloads that are partially supported, attempting to instantiate them
586         // using parameters that make IsLayerSupported() return false should throw an
587         // InvalidArgumentException or UnimplementedException.
588         catch(const armnn::InvalidArgumentException& e)
589         {
590             boost::ignore_unused(e);
591             return true;
592         }
593         catch(const armnn::UnimplementedException& e)
594         {
595             boost::ignore_unused(e);
596             return true;
597         }
598         catch(const std::exception& e)
599         {
600             errorMsg = e.what();
601             BOOST_TEST_ERROR(layerName << ": " << errorMsg);
602             return false;
603         }
604         catch(...)
605         {
606             errorMsg = "Unexpected error while testing support for ";
607             BOOST_TEST_ERROR(errorMsg << layerName);
608             return false;
609         }
610     }
611 }
612
613 // Helper function to compute the next type in the LayerType enum.
614 constexpr armnn::LayerType NextType(armnn::LayerType type)
615 {
616     return static_cast<armnn::LayerType>(static_cast<int>(type)+1);
617 }
618
619 // Termination function for determining the end of the LayerType enumeration.
620 template<typename FactoryType, armnn::DataType DataType, armnn::LayerType Type>
621 bool IsLayerSupportedTestsImpl(FactoryType *factory, Tag<armnn::LayerType::LastLayer>)
622 {
623     return IsLayerSupportedTest<FactoryType, DataType, Type>(factory, Tag<Type>());
624 }
625
626 // Recursive function to test and enter in the LayerType enum and then iterate on the next entry.
627 template<typename FactoryType, armnn::DataType DataType, armnn::LayerType Type>
628 bool IsLayerSupportedTestsImpl(FactoryType *factory, Tag<Type>)
629 {
630     bool v = IsLayerSupportedTest<FactoryType, DataType, Type>(factory, Tag<Type>());
631
632     return v &&
633     IsLayerSupportedTestsImpl<FactoryType, DataType, NextType(Type)>
634         (factory, Tag<NextType(Type)>());
635 }
636
637 // Helper function to pass through to the test framework.
638 template<typename FactoryType, armnn::DataType DataType>
639 bool IsLayerSupportedTests(FactoryType *factory)
640 {
641     return IsLayerSupportedTestsImpl<FactoryType, DataType>(factory, Tag<armnn::LayerType::FirstLayer>());
642 }
643
644 template<armnn::LayerType Type>
645 bool TestLayerTypeMatches()
646 {
647     using LayerPolicy = LayerTypePolicy<Type, armnn::DataType::Float32>;
648     using LayerType = typename LayerPolicy::Type;
649     using LayerDesc = typename LayerPolicy::Desc;
650     DummyLayer<LayerType, LayerDesc> layer;
651
652     std::stringstream ss;
653     ss << LayerPolicy::NameStr << " layer type mismatches expected layer type value.";
654     bool v = Type == layer.m_Layer->GetType();
655     BOOST_CHECK_MESSAGE(v, ss.str());
656     return v;
657 }
658
659 template<armnn::LayerType Type>
660 bool LayerTypeMatchesTestImpl(Tag<armnn::LayerType::LastLayer>)
661 {
662     return TestLayerTypeMatches<Type>();
663 }
664
665 template<armnn::LayerType Type>
666 bool LayerTypeMatchesTestImpl(Tag<Type>)
667 {
668     return TestLayerTypeMatches<Type>() &&
669         LayerTypeMatchesTestImpl<NextType(Type)>(Tag<NextType(Type)>());
670 }
671
672 template<typename FactoryType, typename LayerType, armnn::DataType InputDataType , armnn::DataType OutputDataType>
673 bool IsConvertLayerSupportedTests(std::string& reasonIfUnsupported)
674 {
675     armnn::Graph graph;
676     LayerType* const layer = graph.AddLayer<LayerType>("LayerName");
677
678     armnn::Layer* const input = graph.AddLayer<armnn::InputLayer>(0, "input");
679     armnn::Layer* const output = graph.AddLayer<armnn::OutputLayer>(0, "output");
680
681     armnn::TensorInfo inputTensorInfo({1, 3, 2, 3}, InputDataType);
682     armnn::TensorInfo outputTensorInfo({1, 3, 2, 3}, OutputDataType);
683
684     input->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
685     input->GetOutputHandler(0).SetTensorInfo(inputTensorInfo);
686     layer->GetOutputSlot(0).Connect(output->GetInputSlot(0));
687     layer->GetOutputHandler(0).SetTensorInfo(outputTensorInfo);
688
689     bool result = FactoryType::IsLayerSupported(*layer, InputDataType, reasonIfUnsupported);
690
691     return result;
692 }
693
694 template<typename FactoryType, armnn::DataType InputDataType , armnn::DataType OutputDataType>
695 bool IsMeanLayerSupportedTests(std::string& reasonIfUnsupported)
696 {
697     armnn::Graph graph;
698     static const std::vector<unsigned> axes = {1, 0};
699     armnn::MeanDescriptor desc(axes, false);
700
701     armnn::Layer* const layer = graph.AddLayer<armnn::MeanLayer>(desc, "LayerName");
702
703     armnn::Layer* const input = graph.AddLayer<armnn::InputLayer>(0, "input");
704     armnn::Layer* const output = graph.AddLayer<armnn::OutputLayer>(0, "output");
705
706     armnn::TensorInfo inputTensorInfo({4, 3, 2}, InputDataType);
707     armnn::TensorInfo outputTensorInfo({2}, OutputDataType);
708
709     input->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
710     input->GetOutputHandler(0).SetTensorInfo(inputTensorInfo);
711     layer->GetOutputSlot(0).Connect(output->GetInputSlot(0));
712     layer->GetOutputHandler(0).SetTensorInfo(outputTensorInfo);
713
714     bool result = FactoryType::IsLayerSupported(*layer, InputDataType, reasonIfUnsupported);
715
716     return result;
717 }
718
719 // Tests that IsMeanSupported fails when input tensor dimensions
720 // do not match output tensor dimensions when keepDims == true
721 template<typename FactoryType, armnn::DataType InputDataType , armnn::DataType OutputDataType>
722 bool IsMeanLayerNotSupportedTests(std::string& reasonIfUnsupported)
723 {
724     armnn::Graph graph;
725     static const std::vector<unsigned> axes = {};
726     // Set keepDims == true
727     armnn::MeanDescriptor desc(axes, true);
728
729     armnn::Layer* const layer = graph.AddLayer<armnn::MeanLayer>(desc, "LayerName");
730
731     armnn::Layer* const input = graph.AddLayer<armnn::InputLayer>(0, "input");
732     armnn::Layer* const output = graph.AddLayer<armnn::OutputLayer>(0, "output");
733
734     // Mismatching number of tensor dimensions
735     armnn::TensorInfo inputTensorInfo({1, 1, 1, 1}, InputDataType);
736     armnn::TensorInfo outputTensorInfo({1, 1}, OutputDataType);
737
738     input->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
739     input->GetOutputHandler(0).SetTensorInfo(inputTensorInfo);
740     layer->GetOutputSlot(0).Connect(output->GetInputSlot(0));
741     layer->GetOutputHandler(0).SetTensorInfo(outputTensorInfo);
742
743     bool result = FactoryType::IsLayerSupported(*layer, InputDataType, reasonIfUnsupported);
744
745     return result;
746 }
747
748
749 } //namespace