IVGCVSW-2093 Add SpaceToBatchNd layer and corresponding no-op factory implementations
[platform/upstream/armnn.git] / src / backends / test / IsLayerSupportedTestImpl.hpp
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #pragma once
6
7 #include "Graph.hpp"
8
9 #include <boost/core/ignore_unused.hpp>
10
11 namespace
12 {
13 armnn::Graph dummyGraph;
14
15 // Make a dummy TensorInfo object.
16 template<armnn::DataType DataType>
17 armnn::TensorInfo MakeDummyTensorInfo()
18 {
19     return armnn::TensorInfo({2,2,2,2}, DataType);
20 }
21
22
23 // Make a dummy WorkloadInfo using a dummy TensorInfo.
24 template<armnn::DataType DataType>
25 armnn::WorkloadInfo MakeDummyWorkloadInfo(unsigned int numInputs, unsigned int numOutputs)
26 {
27     armnn::WorkloadInfo info;
28     for (unsigned int i=0; i < numInputs; i++)
29     {
30         info.m_InputTensorInfos.push_back(MakeDummyTensorInfo<DataType>());
31     }
32     for (unsigned int o=0; o < numOutputs; o++)
33     {
34         info.m_OutputTensorInfos.push_back(MakeDummyTensorInfo<DataType>());
35     }
36     return info;
37 }
38
39 // Template class to create a dummy layer (2 parameters).
40 template<typename LayerType, typename DescType = typename LayerType::DescriptorType>
41 struct DummyLayer
42 {
43     DummyLayer()
44     {
45         m_Layer = dummyGraph.AddLayer<LayerType>(DescType(), "");
46     }
47     ~DummyLayer()
48     {
49         dummyGraph.EraseLayer(m_Layer);
50     }
51     LayerType* m_Layer;
52 };
53
54 // Template class to create a dummy layer (1 parameter).
55 template<typename LayerType>
56 struct DummyLayer<LayerType, void>
57 {
58     DummyLayer()
59     {
60         m_Layer = dummyGraph.AddLayer<LayerType>("");
61     }
62     ~DummyLayer()
63     {
64         dummyGraph.EraseLayer(m_Layer);
65     }
66     LayerType* m_Layer;
67 };
68
69 template<>
70 struct DummyLayer<armnn::BatchNormalizationLayer>
71 {
72     DummyLayer()
73     {
74         m_Layer = dummyGraph.AddLayer<armnn::BatchNormalizationLayer>(armnn::BatchNormalizationDescriptor(), "");
75         m_Layer->m_Mean = std::make_unique<armnn::ScopedCpuTensorHandle>(
76             armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
77         m_Layer->m_Variance = std::make_unique<armnn::ScopedCpuTensorHandle>(
78             armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
79         m_Layer->m_Beta = std::make_unique<armnn::ScopedCpuTensorHandle>(
80             armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
81         m_Layer->m_Gamma = std::make_unique<armnn::ScopedCpuTensorHandle>(
82             armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
83     }
84     ~DummyLayer()
85     {
86         dummyGraph.EraseLayer(m_Layer);
87     }
88     armnn::BatchNormalizationLayer* m_Layer;
89
90 };
91
92 template<>
93 struct DummyLayer<armnn::ConstantLayer, void>
94 {
95     DummyLayer()
96     {
97         m_Layer = dummyGraph.AddLayer<armnn::ConstantLayer>("");
98     }
99     ~DummyLayer()
100     {
101         dummyGraph.EraseLayer(m_Layer);
102     }
103     armnn::ConstantLayer* m_Layer;
104 };
105
106 template<>
107 struct DummyLayer<armnn::InputLayer, armnn::LayerBindingId>
108 {
109     DummyLayer()
110     {
111         m_Layer = dummyGraph.AddLayer<armnn::InputLayer>(armnn::LayerBindingId(), "");
112
113     }
114     ~DummyLayer()
115     {
116         dummyGraph.EraseLayer(m_Layer);
117     }
118     armnn::InputLayer* m_Layer;
119 };
120
121 template<>
122 struct DummyLayer<armnn::MergerLayer>
123 {
124     DummyLayer()
125     {
126         armnn::OriginsDescriptor desc(2);
127         m_Layer = dummyGraph.AddLayer<armnn::MergerLayer>(desc, "");
128
129     }
130     ~DummyLayer()
131     {
132         dummyGraph.EraseLayer(m_Layer);
133     }
134     armnn::MergerLayer* m_Layer;
135 };
136
137 template<>
138 struct DummyLayer<armnn::OutputLayer, armnn::LayerBindingId>
139 {
140     DummyLayer()
141     {
142         m_Layer = dummyGraph.AddLayer<armnn::OutputLayer>(armnn::LayerBindingId(), "");
143
144     }
145     ~DummyLayer()
146     {
147         dummyGraph.EraseLayer(m_Layer);
148     }
149     armnn::OutputLayer* m_Layer;
150 };
151
152 template<>
153 struct DummyLayer<armnn::SplitterLayer>
154 {
155     DummyLayer()
156     {
157         armnn::ViewsDescriptor desc(1);
158         m_Layer = dummyGraph.AddLayer<armnn::SplitterLayer>(desc, "");
159
160     }
161     ~DummyLayer()
162     {
163         dummyGraph.EraseLayer(m_Layer);
164     }
165     armnn::SplitterLayer* m_Layer;
166 };
167
168 template <typename ConvolutionLayerType>
169 struct DummyConvolutionLayer
170 {
171     DummyConvolutionLayer()
172     {
173         typename ConvolutionLayerType::DescriptorType desc;
174         m_Layer = dummyGraph.AddLayer<ConvolutionLayerType>(desc, "");
175         m_Layer->m_Weight = std::make_unique<armnn::ScopedCpuTensorHandle>(
176             armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
177         m_Layer->m_Bias = std::make_unique<armnn::ScopedCpuTensorHandle>(
178             armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
179     }
180     ~DummyConvolutionLayer()
181     {
182         dummyGraph.EraseLayer(m_Layer);
183     }
184     ConvolutionLayerType* m_Layer;
185 };
186
187 template<>
188 struct DummyLayer<armnn::Convolution2dLayer>
189     : public DummyConvolutionLayer<armnn::Convolution2dLayer>
190 {
191 };
192
193 template<>
194 struct DummyLayer<armnn::DepthwiseConvolution2dLayer>
195     : public DummyConvolutionLayer<armnn::DepthwiseConvolution2dLayer>
196 {
197 };
198
199 template <typename LstmLayerType>
200 struct DummyLstmLayer
201 {
202     DummyLstmLayer()
203     {
204         typename LstmLayerType::DescriptorType desc;
205         desc.m_CifgEnabled = false;
206
207         m_Layer = dummyGraph.AddLayer<LstmLayerType>(armnn::LstmDescriptor(), "");
208         m_Layer->m_BasicParameters.m_InputToForgetWeights     = std::make_unique<armnn::ScopedCpuTensorHandle>(
209                 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
210         m_Layer->m_BasicParameters.m_InputToCellWeights       = std::make_unique<armnn::ScopedCpuTensorHandle>(
211                 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
212         m_Layer->m_BasicParameters.m_InputToOutputWeights     = std::make_unique<armnn::ScopedCpuTensorHandle>(
213                 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
214         m_Layer->m_BasicParameters.m_RecurrentToForgetWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
215                 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
216         m_Layer->m_BasicParameters.m_RecurrentToCellWeights   = std::make_unique<armnn::ScopedCpuTensorHandle>(
217                 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
218         m_Layer->m_BasicParameters.m_RecurrentToOutputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
219                 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
220         m_Layer->m_BasicParameters.m_ForgetGateBias           = std::make_unique<armnn::ScopedCpuTensorHandle>(
221                 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
222         m_Layer->m_BasicParameters.m_CellBias                 = std::make_unique<armnn::ScopedCpuTensorHandle>(
223                 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
224         m_Layer->m_BasicParameters.m_OutputGateBias           = std::make_unique<armnn::ScopedCpuTensorHandle>(
225                 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
226
227         m_Layer->m_CifgParameters.m_InputToInputWeights        = std::make_unique<armnn::ScopedCpuTensorHandle>(
228                 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
229         m_Layer->m_CifgParameters.m_RecurrentToInputWeights    = std::make_unique<armnn::ScopedCpuTensorHandle>(
230                 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
231         m_Layer->m_CifgParameters.m_CellToInputWeights         = std::make_unique<armnn::ScopedCpuTensorHandle>(
232                 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
233         m_Layer->m_CifgParameters.m_InputGateBias              = std::make_unique<armnn::ScopedCpuTensorHandle>(
234                 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
235     }
236     ~DummyLstmLayer()
237     {
238         dummyGraph.EraseLayer(m_Layer);
239     }
240     armnn::LstmLayer* m_Layer;
241 };
242
243 template<>
244 struct DummyLayer<armnn::LstmLayer>
245         : public DummyLstmLayer<armnn::LstmLayer>
246 {
247 };
248
249 template<>
250 struct DummyLayer<armnn::FullyConnectedLayer>
251 {
252     DummyLayer()
253     {
254         armnn::FullyConnectedLayer::DescriptorType desc;
255         m_Layer = dummyGraph.AddLayer<armnn::FullyConnectedLayer>(desc, "");
256         m_Layer->m_Weight = std::make_unique<armnn::ScopedCpuTensorHandle>(
257             armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
258     }
259     ~DummyLayer()
260     {
261         dummyGraph.EraseLayer(m_Layer);
262     }
263     armnn::FullyConnectedLayer* m_Layer;
264 };
265
266 // Tag for giving LayerType entries a unique strong type each.
267 template<armnn::LayerType>
268 struct Tag{};
269
270 #define DECLARE_LAYER_POLICY_CUSTOM_PARAM(name, descType) \
271 template<armnn::DataType DataType> \
272 struct LayerTypePolicy<armnn::LayerType::name, DataType> \
273 { \
274     using Type = armnn::name##Layer; \
275     using Desc = descType; \
276     using QueueDesc = armnn::name##QueueDescriptor; \
277     constexpr static const char* NameStr = #name; \
278     \
279     static std::unique_ptr<armnn::IWorkload> MakeDummyWorkload(armnn::IWorkloadFactory *factory, \
280         unsigned int nIn, unsigned int nOut) \
281     { \
282         QueueDesc desc; \
283         armnn::WorkloadInfo info = MakeDummyWorkloadInfo<DataType>(nIn, nOut); \
284         return factory->Create##name(desc, info); \
285     } \
286 };
287
288 // Define a layer policy specialization for use with the IsLayerSupported tests.
289 // Use this version for layers whose constructor takes 1 parameter(name).
290 #define DECLARE_LAYER_POLICY_1_PARAM(name) DECLARE_LAYER_POLICY_CUSTOM_PARAM(name, void)
291
292 // Define a layer policy specialization for use with the IsLayerSupported tests.
293 // Use this version for layers whose constructor takes 2 parameters(descriptor and name).
294 #define DECLARE_LAYER_POLICY_2_PARAM(name) DECLARE_LAYER_POLICY_CUSTOM_PARAM(name, armnn::name##Descriptor)
295
296 // Layer policy template.
297 template<armnn::LayerType Type, armnn::DataType DataType>
298 struct LayerTypePolicy;
299
300 // Every entry in the armnn::LayerType enum must be accounted for below.
301 DECLARE_LAYER_POLICY_2_PARAM(Activation)
302
303 DECLARE_LAYER_POLICY_1_PARAM(Addition)
304
305 DECLARE_LAYER_POLICY_2_PARAM(BatchNormalization)
306
307 DECLARE_LAYER_POLICY_1_PARAM(Constant)
308
309 DECLARE_LAYER_POLICY_1_PARAM(ConvertFp16ToFp32)
310
311 DECLARE_LAYER_POLICY_1_PARAM(ConvertFp32ToFp16)
312
313 DECLARE_LAYER_POLICY_2_PARAM(Convolution2d)
314
315 DECLARE_LAYER_POLICY_1_PARAM(MemCopy)
316
317 DECLARE_LAYER_POLICY_2_PARAM(DepthwiseConvolution2d)
318
319 DECLARE_LAYER_POLICY_2_PARAM(FakeQuantization)
320
321 DECLARE_LAYER_POLICY_1_PARAM(Floor)
322
323 DECLARE_LAYER_POLICY_2_PARAM(FullyConnected)
324
325 DECLARE_LAYER_POLICY_CUSTOM_PARAM(Input, armnn::LayerBindingId)
326
327 DECLARE_LAYER_POLICY_2_PARAM(L2Normalization)
328
329 DECLARE_LAYER_POLICY_2_PARAM(Lstm)
330
331 DECLARE_LAYER_POLICY_2_PARAM(Mean)
332
333 DECLARE_LAYER_POLICY_2_PARAM(Merger)
334
335 DECLARE_LAYER_POLICY_1_PARAM(Multiplication)
336
337 DECLARE_LAYER_POLICY_2_PARAM(Normalization)
338
339 DECLARE_LAYER_POLICY_CUSTOM_PARAM(Output, armnn::LayerBindingId)
340
341 DECLARE_LAYER_POLICY_2_PARAM(Pad)
342
343 DECLARE_LAYER_POLICY_2_PARAM(Permute)
344
345 DECLARE_LAYER_POLICY_2_PARAM(Pooling2d)
346
347 DECLARE_LAYER_POLICY_1_PARAM(Division)
348
349 DECLARE_LAYER_POLICY_2_PARAM(ResizeBilinear)
350
351 DECLARE_LAYER_POLICY_2_PARAM(Reshape)
352
353 DECLARE_LAYER_POLICY_2_PARAM(Softmax)
354
355 DECLARE_LAYER_POLICY_2_PARAM(SpaceToBatchNd)
356
357 DECLARE_LAYER_POLICY_2_PARAM(Splitter)
358
359 DECLARE_LAYER_POLICY_1_PARAM(Subtraction)
360
361
362 // Generic implementation to get the number of input slots for a given layer type;
363 template<armnn::LayerType Type>
364 unsigned int GetNumInputs(const armnn::Layer& layer)
365 {
366     return layer.GetNumInputSlots();
367 }
368
369 // Generic implementation to get the number of output slots for a given layer type;
370 template<armnn::LayerType Type>
371 unsigned int GetNumOutputs(const armnn::Layer& layer)
372 {
373     return layer.GetNumOutputSlots();
374 }
375
376 template<>
377 unsigned int GetNumInputs<armnn::LayerType::Merger>(const armnn::Layer& layer)
378 {
379     boost::ignore_unused(layer);
380     return 2;
381 }
382
383 // Tests that the IsLayerSupported() function returns the correct value.
384 // We determined the correct value by *trying* to create the relevant workload and seeing if it matches what we expect.
385 // Returns true if expectations are met, otherwise returns false.
386 template<typename FactoryType, armnn::DataType DataType, armnn::LayerType Type>
387 bool IsLayerSupportedTest(FactoryType *factory, Tag<Type>)
388 {
389     using LayerPolicy = LayerTypePolicy<Type, DataType>;
390     using LayerType = typename LayerPolicy::Type;
391     using LayerDesc = typename LayerPolicy::Desc;
392     DummyLayer<LayerType, LayerDesc> layer;
393
394     unsigned int numIn = GetNumInputs<Type>(*layer.m_Layer);
395     unsigned int numOut = GetNumOutputs<Type>(*layer.m_Layer);
396
397     // Make another dummy layer just to make IsLayerSupported have valid inputs.
398     DummyLayer<armnn::ConstantLayer, void> previousLayer;
399     // Set output of the previous layer to a dummy tensor.
400     armnn::TensorInfo output = MakeDummyTensorInfo<DataType>();
401     previousLayer.m_Layer->GetOutputSlot(0).SetTensorInfo(output);
402     // Connect all outputs of the previous layer to inputs of tested layer.
403     for (unsigned int i = 0; i < numIn; i++)
404     {
405         armnn::IOutputSlot& previousLayerOutputSlot = previousLayer.m_Layer->GetOutputSlot(0);
406         armnn::IInputSlot& layerInputSlot = layer.m_Layer->GetInputSlot(i);
407         previousLayerOutputSlot.Connect(layerInputSlot);
408     }
409     // Set outputs of tested layer to a dummy tensor.
410     for (unsigned int i = 0; i < numOut; i++)
411     {
412         layer.m_Layer->GetOutputSlot(0).SetTensorInfo(output);
413     }
414
415     std::string layerName = LayerPolicy::NameStr;
416     std::string reasonIfUnsupported;
417     if (FactoryType::IsLayerSupported(*layer.m_Layer, DataType, reasonIfUnsupported))
418     {
419         std::string errorMsg = " layer expected support but found none.";
420         try
421         {
422             bool retVal = LayerPolicy::MakeDummyWorkload(factory, numIn, numOut).get() != nullptr;
423             BOOST_CHECK_MESSAGE(retVal, layerName << errorMsg);
424             return retVal;
425         }
426         catch(const armnn::InvalidArgumentException& e)
427         {
428             boost::ignore_unused(e);
429             // This is ok since we throw InvalidArgumentException when creating the dummy workload.
430             return true;
431         }
432         catch(const std::exception& e)
433         {
434             errorMsg = e.what();
435             BOOST_TEST_ERROR(layerName << ": " << errorMsg);
436             return false;
437         }
438         catch(...)
439         {
440             errorMsg = "Unexpected error while testing support for ";
441             BOOST_TEST_ERROR(errorMsg << layerName);
442             return false;
443         }
444     }
445     else
446     {
447         std::string errorMsg = "layer expected no support (giving reason: " + reasonIfUnsupported + ") but found some.";
448         try
449         {
450             bool retVal = LayerPolicy::MakeDummyWorkload(factory, numIn, numOut).get() == nullptr;
451             BOOST_CHECK_MESSAGE(retVal, layerName << errorMsg);
452             return retVal;
453         }
454         // These two exceptions are ok: For workloads that are partially supported, attempting to instantiate them
455         // using parameters that make IsLayerSupported() return false should throw an
456         // InvalidArgumentException or UnimplementedException.
457         catch(const armnn::InvalidArgumentException& e)
458         {
459             boost::ignore_unused(e);
460             return true;
461         }
462         catch(const armnn::UnimplementedException& e)
463         {
464             boost::ignore_unused(e);
465             return true;
466         }
467         catch(const std::exception& e)
468         {
469             errorMsg = e.what();
470             BOOST_TEST_ERROR(layerName << ": " << errorMsg);
471             return false;
472         }
473         catch(...)
474         {
475             errorMsg = "Unexpected error while testing support for ";
476             BOOST_TEST_ERROR(errorMsg << layerName);
477             return false;
478         }
479     }
480 }
481
482 // Helper function to compute the next type in the LayerType enum.
483 constexpr armnn::LayerType NextType(armnn::LayerType type)
484 {
485     return static_cast<armnn::LayerType>(static_cast<int>(type)+1);
486 }
487
488 // Termination function for determining the end of the LayerType enumeration.
489 template<typename FactoryType, armnn::DataType DataType, armnn::LayerType Type>
490 bool IsLayerSupportedTestsImpl(FactoryType *factory, Tag<armnn::LayerType::LastLayer>)
491 {
492     return IsLayerSupportedTest<FactoryType, DataType, Type>(factory, Tag<Type>());
493 };
494
495 // Recursive function to test and enter in the LayerType enum and then iterate on the next entry.
496 template<typename FactoryType, armnn::DataType DataType, armnn::LayerType Type>
497 bool IsLayerSupportedTestsImpl(FactoryType *factory, Tag<Type>)
498 {
499     bool v = IsLayerSupportedTest<FactoryType, DataType, Type>(factory, Tag<Type>());
500
501     return v &&
502     IsLayerSupportedTestsImpl<FactoryType, DataType, NextType(Type)>
503         (factory, Tag<NextType(Type)>());
504 };
505
506 // Helper function to pass through to the test framework.
507 template<typename FactoryType, armnn::DataType DataType>
508 bool IsLayerSupportedTests(FactoryType *factory)
509 {
510     return IsLayerSupportedTestsImpl<FactoryType, DataType>(factory, Tag<armnn::LayerType::FirstLayer>());
511 };
512
513 template<armnn::LayerType Type>
514 bool TestLayerTypeMatches()
515 {
516     using LayerPolicy = LayerTypePolicy<Type, armnn::DataType::Float32>;
517     using LayerType = typename LayerPolicy::Type;
518     using LayerDesc = typename LayerPolicy::Desc;
519     DummyLayer<LayerType, LayerDesc> layer;
520
521     std::stringstream ss;
522     ss << LayerPolicy::NameStr << " layer type mismatches expected layer type value.";
523     bool v = Type == layer.m_Layer->GetType();
524     BOOST_CHECK_MESSAGE(v, ss.str());
525     return v;
526 };
527
528 template<armnn::LayerType Type>
529 bool LayerTypeMatchesTestImpl(Tag<armnn::LayerType::LastLayer>)
530 {
531     return TestLayerTypeMatches<Type>();
532 };
533
534 template<armnn::LayerType Type>
535 bool LayerTypeMatchesTestImpl(Tag<Type>)
536 {
537     return TestLayerTypeMatches<Type>() &&
538         LayerTypeMatchesTestImpl<NextType(Type)>(Tag<NextType(Type)>());
539 };
540
541 template<typename FactoryType, typename LayerType, armnn::DataType InputDataType , armnn::DataType OutputDataType>
542 bool IsConvertLayerSupportedTests(std::string& reasonIfUnsupported)
543 {
544     armnn::Graph graph;
545     LayerType* const layer = graph.AddLayer<LayerType>("LayerName");
546
547     armnn::Layer* const input = graph.AddLayer<armnn::InputLayer>(0, "input");
548     armnn::Layer* const output = graph.AddLayer<armnn::OutputLayer>(0, "output");
549
550     armnn::TensorInfo inputTensorInfo({1, 3, 2, 3}, InputDataType);
551     armnn::TensorInfo outputTensorInfo({1, 3, 2, 3}, OutputDataType);
552
553     input->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
554     input->GetOutputHandler(0).SetTensorInfo(inputTensorInfo);
555     layer->GetOutputSlot(0).Connect(output->GetInputSlot(0));
556     layer->GetOutputHandler(0).SetTensorInfo(outputTensorInfo);
557
558     bool result = FactoryType::IsLayerSupported(*layer, InputDataType, reasonIfUnsupported);
559
560     return result;
561 };
562
563 } //namespace