IVGCVSW-2093 Add SpaceToBatchNd layer and corresponding no-op factory implementations
[platform/upstream/armnn.git] / src / backends / backendsCommon / test / IsLayerSupportedTestImpl.hpp
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #pragma once
6
7 #include <Graph.hpp>
8
9 #include <backendsCommon/WorkloadFactory.hpp>
10
11 #include <boost/core/ignore_unused.hpp>
12
13 namespace
14 {
15 armnn::Graph dummyGraph;
16
17 // Make a dummy TensorInfo object.
18 template<armnn::DataType DataType>
19 armnn::TensorInfo MakeDummyTensorInfo()
20 {
21     return armnn::TensorInfo({2,2,2,2}, DataType);
22 }
23
24
25 // Make a dummy WorkloadInfo using a dummy TensorInfo.
26 template<armnn::DataType DataType>
27 armnn::WorkloadInfo MakeDummyWorkloadInfo(unsigned int numInputs, unsigned int numOutputs)
28 {
29     armnn::WorkloadInfo info;
30     for (unsigned int i=0; i < numInputs; i++)
31     {
32         info.m_InputTensorInfos.push_back(MakeDummyTensorInfo<DataType>());
33     }
34     for (unsigned int o=0; o < numOutputs; o++)
35     {
36         info.m_OutputTensorInfos.push_back(MakeDummyTensorInfo<DataType>());
37     }
38     return info;
39 }
40
41 // Template class to create a dummy layer (2 parameters).
42 template<typename LayerType, typename DescType = typename LayerType::DescriptorType>
43 struct DummyLayer
44 {
45     DummyLayer()
46     {
47         m_Layer = dummyGraph.AddLayer<LayerType>(DescType(), "");
48     }
49     ~DummyLayer()
50     {
51         dummyGraph.EraseLayer(m_Layer);
52     }
53     LayerType* m_Layer;
54 };
55
56 // Template class to create a dummy layer (1 parameter).
57 template<typename LayerType>
58 struct DummyLayer<LayerType, void>
59 {
60     DummyLayer()
61     {
62         m_Layer = dummyGraph.AddLayer<LayerType>("");
63     }
64     ~DummyLayer()
65     {
66         dummyGraph.EraseLayer(m_Layer);
67     }
68     LayerType* m_Layer;
69 };
70
71 template<>
72 struct DummyLayer<armnn::BatchNormalizationLayer>
73 {
74     DummyLayer()
75     {
76         m_Layer = dummyGraph.AddLayer<armnn::BatchNormalizationLayer>(armnn::BatchNormalizationDescriptor(), "");
77         m_Layer->m_Mean = std::make_unique<armnn::ScopedCpuTensorHandle>(
78             armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
79         m_Layer->m_Variance = std::make_unique<armnn::ScopedCpuTensorHandle>(
80             armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
81         m_Layer->m_Beta = std::make_unique<armnn::ScopedCpuTensorHandle>(
82             armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
83         m_Layer->m_Gamma = std::make_unique<armnn::ScopedCpuTensorHandle>(
84             armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
85     }
86     ~DummyLayer()
87     {
88         dummyGraph.EraseLayer(m_Layer);
89     }
90     armnn::BatchNormalizationLayer* m_Layer;
91
92 };
93
94 template<>
95 struct DummyLayer<armnn::ConstantLayer, void>
96 {
97     DummyLayer()
98     {
99         m_Layer = dummyGraph.AddLayer<armnn::ConstantLayer>("");
100     }
101     ~DummyLayer()
102     {
103         dummyGraph.EraseLayer(m_Layer);
104     }
105     armnn::ConstantLayer* m_Layer;
106 };
107
108 template<>
109 struct DummyLayer<armnn::InputLayer, armnn::LayerBindingId>
110 {
111     DummyLayer()
112     {
113         m_Layer = dummyGraph.AddLayer<armnn::InputLayer>(armnn::LayerBindingId(), "");
114
115     }
116     ~DummyLayer()
117     {
118         dummyGraph.EraseLayer(m_Layer);
119     }
120     armnn::InputLayer* m_Layer;
121 };
122
123 template<>
124 struct DummyLayer<armnn::MergerLayer>
125 {
126     DummyLayer()
127     {
128         armnn::OriginsDescriptor desc(2);
129         m_Layer = dummyGraph.AddLayer<armnn::MergerLayer>(desc, "");
130
131     }
132     ~DummyLayer()
133     {
134         dummyGraph.EraseLayer(m_Layer);
135     }
136     armnn::MergerLayer* m_Layer;
137 };
138
139 template<>
140 struct DummyLayer<armnn::OutputLayer, armnn::LayerBindingId>
141 {
142     DummyLayer()
143     {
144         m_Layer = dummyGraph.AddLayer<armnn::OutputLayer>(armnn::LayerBindingId(), "");
145
146     }
147     ~DummyLayer()
148     {
149         dummyGraph.EraseLayer(m_Layer);
150     }
151     armnn::OutputLayer* m_Layer;
152 };
153
154 template<>
155 struct DummyLayer<armnn::SplitterLayer>
156 {
157     DummyLayer()
158     {
159         armnn::ViewsDescriptor desc(1);
160         m_Layer = dummyGraph.AddLayer<armnn::SplitterLayer>(desc, "");
161
162     }
163     ~DummyLayer()
164     {
165         dummyGraph.EraseLayer(m_Layer);
166     }
167     armnn::SplitterLayer* m_Layer;
168 };
169
170 template <typename ConvolutionLayerType>
171 struct DummyConvolutionLayer
172 {
173     DummyConvolutionLayer()
174     {
175         typename ConvolutionLayerType::DescriptorType desc;
176         m_Layer = dummyGraph.AddLayer<ConvolutionLayerType>(desc, "");
177         m_Layer->m_Weight = std::make_unique<armnn::ScopedCpuTensorHandle>(
178             armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
179         m_Layer->m_Bias = std::make_unique<armnn::ScopedCpuTensorHandle>(
180             armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
181     }
182     ~DummyConvolutionLayer()
183     {
184         dummyGraph.EraseLayer(m_Layer);
185     }
186     ConvolutionLayerType* m_Layer;
187 };
188
189 template<>
190 struct DummyLayer<armnn::Convolution2dLayer>
191     : public DummyConvolutionLayer<armnn::Convolution2dLayer>
192 {
193 };
194
195 template<>
196 struct DummyLayer<armnn::DepthwiseConvolution2dLayer>
197     : public DummyConvolutionLayer<armnn::DepthwiseConvolution2dLayer>
198 {
199 };
200
201 template <typename LstmLayerType>
202 struct DummyLstmLayer
203 {
204     DummyLstmLayer()
205     {
206         typename LstmLayerType::DescriptorType desc;
207         desc.m_CifgEnabled = false;
208
209         m_Layer = dummyGraph.AddLayer<LstmLayerType>(armnn::LstmDescriptor(), "");
210         m_Layer->m_BasicParameters.m_InputToForgetWeights     = std::make_unique<armnn::ScopedCpuTensorHandle>(
211                 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
212         m_Layer->m_BasicParameters.m_InputToCellWeights       = std::make_unique<armnn::ScopedCpuTensorHandle>(
213                 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
214         m_Layer->m_BasicParameters.m_InputToOutputWeights     = std::make_unique<armnn::ScopedCpuTensorHandle>(
215                 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
216         m_Layer->m_BasicParameters.m_RecurrentToForgetWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
217                 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
218         m_Layer->m_BasicParameters.m_RecurrentToCellWeights   = std::make_unique<armnn::ScopedCpuTensorHandle>(
219                 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
220         m_Layer->m_BasicParameters.m_RecurrentToOutputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
221                 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
222         m_Layer->m_BasicParameters.m_ForgetGateBias           = std::make_unique<armnn::ScopedCpuTensorHandle>(
223                 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
224         m_Layer->m_BasicParameters.m_CellBias                 = std::make_unique<armnn::ScopedCpuTensorHandle>(
225                 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
226         m_Layer->m_BasicParameters.m_OutputGateBias           = std::make_unique<armnn::ScopedCpuTensorHandle>(
227                 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
228
229         m_Layer->m_CifgParameters.m_InputToInputWeights        = std::make_unique<armnn::ScopedCpuTensorHandle>(
230                 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
231         m_Layer->m_CifgParameters.m_RecurrentToInputWeights    = std::make_unique<armnn::ScopedCpuTensorHandle>(
232                 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
233         m_Layer->m_CifgParameters.m_CellToInputWeights         = std::make_unique<armnn::ScopedCpuTensorHandle>(
234                 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
235         m_Layer->m_CifgParameters.m_InputGateBias              = std::make_unique<armnn::ScopedCpuTensorHandle>(
236                 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
237     }
238     ~DummyLstmLayer()
239     {
240         dummyGraph.EraseLayer(m_Layer);
241     }
242     armnn::LstmLayer* m_Layer;
243 };
244
245 template<>
246 struct DummyLayer<armnn::LstmLayer>
247         : public DummyLstmLayer<armnn::LstmLayer>
248 {
249 };
250
251 template<>
252 struct DummyLayer<armnn::FullyConnectedLayer>
253 {
254     DummyLayer()
255     {
256         armnn::FullyConnectedLayer::DescriptorType desc;
257         m_Layer = dummyGraph.AddLayer<armnn::FullyConnectedLayer>(desc, "");
258         m_Layer->m_Weight = std::make_unique<armnn::ScopedCpuTensorHandle>(
259             armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
260     }
261     ~DummyLayer()
262     {
263         dummyGraph.EraseLayer(m_Layer);
264     }
265     armnn::FullyConnectedLayer* m_Layer;
266 };
267
268 // Tag for giving LayerType entries a unique strong type each.
269 template<armnn::LayerType>
270 struct Tag{};
271
272 #define DECLARE_LAYER_POLICY_CUSTOM_PARAM(name, descType) \
273 template<armnn::DataType DataType> \
274 struct LayerTypePolicy<armnn::LayerType::name, DataType> \
275 { \
276     using Type = armnn::name##Layer; \
277     using Desc = descType; \
278     using QueueDesc = armnn::name##QueueDescriptor; \
279     constexpr static const char* NameStr = #name; \
280     \
281     static std::unique_ptr<armnn::IWorkload> MakeDummyWorkload(armnn::IWorkloadFactory *factory, \
282         unsigned int nIn, unsigned int nOut) \
283     { \
284         QueueDesc desc; \
285         armnn::WorkloadInfo info = MakeDummyWorkloadInfo<DataType>(nIn, nOut); \
286         return factory->Create##name(desc, info); \
287     } \
288 };
289
290 // Define a layer policy specialization for use with the IsLayerSupported tests.
291 // Use this version for layers whose constructor takes 1 parameter(name).
292 #define DECLARE_LAYER_POLICY_1_PARAM(name) DECLARE_LAYER_POLICY_CUSTOM_PARAM(name, void)
293
294 // Define a layer policy specialization for use with the IsLayerSupported tests.
295 // Use this version for layers whose constructor takes 2 parameters(descriptor and name).
296 #define DECLARE_LAYER_POLICY_2_PARAM(name) DECLARE_LAYER_POLICY_CUSTOM_PARAM(name, armnn::name##Descriptor)
297
298 // Layer policy template.
299 template<armnn::LayerType Type, armnn::DataType DataType>
300 struct LayerTypePolicy;
301
302 // Every entry in the armnn::LayerType enum must be accounted for below.
303 DECLARE_LAYER_POLICY_2_PARAM(Activation)
304
305 DECLARE_LAYER_POLICY_1_PARAM(Addition)
306
307 DECLARE_LAYER_POLICY_2_PARAM(BatchNormalization)
308
309 DECLARE_LAYER_POLICY_1_PARAM(Constant)
310
311 DECLARE_LAYER_POLICY_1_PARAM(ConvertFp16ToFp32)
312
313 DECLARE_LAYER_POLICY_1_PARAM(ConvertFp32ToFp16)
314
315 DECLARE_LAYER_POLICY_2_PARAM(Convolution2d)
316
317 DECLARE_LAYER_POLICY_1_PARAM(MemCopy)
318
319 DECLARE_LAYER_POLICY_2_PARAM(DepthwiseConvolution2d)
320
321 DECLARE_LAYER_POLICY_2_PARAM(FakeQuantization)
322
323 DECLARE_LAYER_POLICY_1_PARAM(Floor)
324
325 DECLARE_LAYER_POLICY_2_PARAM(FullyConnected)
326
327 DECLARE_LAYER_POLICY_CUSTOM_PARAM(Input, armnn::LayerBindingId)
328
329 DECLARE_LAYER_POLICY_2_PARAM(L2Normalization)
330
331 DECLARE_LAYER_POLICY_2_PARAM(Lstm)
332
333 DECLARE_LAYER_POLICY_2_PARAM(Mean)
334
335 DECLARE_LAYER_POLICY_2_PARAM(Merger)
336
337 DECLARE_LAYER_POLICY_1_PARAM(Multiplication)
338
339 DECLARE_LAYER_POLICY_2_PARAM(Normalization)
340
341 DECLARE_LAYER_POLICY_CUSTOM_PARAM(Output, armnn::LayerBindingId)
342
343 DECLARE_LAYER_POLICY_2_PARAM(Pad)
344
345 DECLARE_LAYER_POLICY_2_PARAM(Permute)
346
347 DECLARE_LAYER_POLICY_2_PARAM(Pooling2d)
348
349 DECLARE_LAYER_POLICY_1_PARAM(Division)
350
351 DECLARE_LAYER_POLICY_2_PARAM(ResizeBilinear)
352
353 DECLARE_LAYER_POLICY_2_PARAM(Reshape)
354
355 DECLARE_LAYER_POLICY_2_PARAM(Softmax)
356
357 DECLARE_LAYER_POLICY_2_PARAM(SpaceToBatchNd)
358
359 DECLARE_LAYER_POLICY_2_PARAM(Splitter)
360
361 DECLARE_LAYER_POLICY_1_PARAM(Subtraction)
362
363
364 // Generic implementation to get the number of input slots for a given layer type;
365 template<armnn::LayerType Type>
366 unsigned int GetNumInputs(const armnn::Layer& layer)
367 {
368     return layer.GetNumInputSlots();
369 }
370
371 // Generic implementation to get the number of output slots for a given layer type;
372 template<armnn::LayerType Type>
373 unsigned int GetNumOutputs(const armnn::Layer& layer)
374 {
375     return layer.GetNumOutputSlots();
376 }
377
378 template<>
379 unsigned int GetNumInputs<armnn::LayerType::Merger>(const armnn::Layer& layer)
380 {
381     boost::ignore_unused(layer);
382     return 2;
383 }
384
385 // Tests that the IsLayerSupported() function returns the correct value.
386 // We determined the correct value by *trying* to create the relevant workload and seeing if it matches what we expect.
387 // Returns true if expectations are met, otherwise returns false.
388 template<typename FactoryType, armnn::DataType DataType, armnn::LayerType Type>
389 bool IsLayerSupportedTest(FactoryType *factory, Tag<Type>)
390 {
391     using LayerPolicy = LayerTypePolicy<Type, DataType>;
392     using LayerType = typename LayerPolicy::Type;
393     using LayerDesc = typename LayerPolicy::Desc;
394     DummyLayer<LayerType, LayerDesc> layer;
395
396     unsigned int numIn = GetNumInputs<Type>(*layer.m_Layer);
397     unsigned int numOut = GetNumOutputs<Type>(*layer.m_Layer);
398
399     // Make another dummy layer just to make IsLayerSupported have valid inputs.
400     DummyLayer<armnn::ConstantLayer, void> previousLayer;
401     // Set output of the previous layer to a dummy tensor.
402     armnn::TensorInfo output = MakeDummyTensorInfo<DataType>();
403     previousLayer.m_Layer->GetOutputSlot(0).SetTensorInfo(output);
404     // Connect all outputs of the previous layer to inputs of tested layer.
405     for (unsigned int i = 0; i < numIn; i++)
406     {
407         armnn::IOutputSlot& previousLayerOutputSlot = previousLayer.m_Layer->GetOutputSlot(0);
408         armnn::IInputSlot& layerInputSlot = layer.m_Layer->GetInputSlot(i);
409         previousLayerOutputSlot.Connect(layerInputSlot);
410     }
411     // Set outputs of tested layer to a dummy tensor.
412     for (unsigned int i = 0; i < numOut; i++)
413     {
414         layer.m_Layer->GetOutputSlot(0).SetTensorInfo(output);
415     }
416
417     std::string layerName = LayerPolicy::NameStr;
418     std::string reasonIfUnsupported;
419     if (FactoryType::IsLayerSupported(*layer.m_Layer, DataType, reasonIfUnsupported))
420     {
421         std::string errorMsg = " layer expected support but found none.";
422         try
423         {
424             bool retVal = LayerPolicy::MakeDummyWorkload(factory, numIn, numOut).get() != nullptr;
425             BOOST_CHECK_MESSAGE(retVal, layerName << errorMsg);
426             return retVal;
427         }
428         catch(const armnn::InvalidArgumentException& e)
429         {
430             boost::ignore_unused(e);
431             // This is ok since we throw InvalidArgumentException when creating the dummy workload.
432             return true;
433         }
434         catch(const std::exception& e)
435         {
436             errorMsg = e.what();
437             BOOST_TEST_ERROR(layerName << ": " << errorMsg);
438             return false;
439         }
440         catch(...)
441         {
442             errorMsg = "Unexpected error while testing support for ";
443             BOOST_TEST_ERROR(errorMsg << layerName);
444             return false;
445         }
446     }
447     else
448     {
449         std::string errorMsg = "layer expected no support (giving reason: " + reasonIfUnsupported + ") but found some.";
450         try
451         {
452             bool retVal = LayerPolicy::MakeDummyWorkload(factory, numIn, numOut).get() == nullptr;
453             BOOST_CHECK_MESSAGE(retVal, layerName << errorMsg);
454             return retVal;
455         }
456         // These two exceptions are ok: For workloads that are partially supported, attempting to instantiate them
457         // using parameters that make IsLayerSupported() return false should throw an
458         // InvalidArgumentException or UnimplementedException.
459         catch(const armnn::InvalidArgumentException& e)
460         {
461             boost::ignore_unused(e);
462             return true;
463         }
464         catch(const armnn::UnimplementedException& e)
465         {
466             boost::ignore_unused(e);
467             return true;
468         }
469         catch(const std::exception& e)
470         {
471             errorMsg = e.what();
472             BOOST_TEST_ERROR(layerName << ": " << errorMsg);
473             return false;
474         }
475         catch(...)
476         {
477             errorMsg = "Unexpected error while testing support for ";
478             BOOST_TEST_ERROR(errorMsg << layerName);
479             return false;
480         }
481     }
482 }
483
484 // Helper function to compute the next type in the LayerType enum.
485 constexpr armnn::LayerType NextType(armnn::LayerType type)
486 {
487     return static_cast<armnn::LayerType>(static_cast<int>(type)+1);
488 }
489
490 // Termination function for determining the end of the LayerType enumeration.
491 template<typename FactoryType, armnn::DataType DataType, armnn::LayerType Type>
492 bool IsLayerSupportedTestsImpl(FactoryType *factory, Tag<armnn::LayerType::LastLayer>)
493 {
494     return IsLayerSupportedTest<FactoryType, DataType, Type>(factory, Tag<Type>());
495 };
496
497 // Recursive function to test and enter in the LayerType enum and then iterate on the next entry.
498 template<typename FactoryType, armnn::DataType DataType, armnn::LayerType Type>
499 bool IsLayerSupportedTestsImpl(FactoryType *factory, Tag<Type>)
500 {
501     bool v = IsLayerSupportedTest<FactoryType, DataType, Type>(factory, Tag<Type>());
502
503     return v &&
504     IsLayerSupportedTestsImpl<FactoryType, DataType, NextType(Type)>
505         (factory, Tag<NextType(Type)>());
506 };
507
508 // Helper function to pass through to the test framework.
509 template<typename FactoryType, armnn::DataType DataType>
510 bool IsLayerSupportedTests(FactoryType *factory)
511 {
512     return IsLayerSupportedTestsImpl<FactoryType, DataType>(factory, Tag<armnn::LayerType::FirstLayer>());
513 };
514
515 template<armnn::LayerType Type>
516 bool TestLayerTypeMatches()
517 {
518     using LayerPolicy = LayerTypePolicy<Type, armnn::DataType::Float32>;
519     using LayerType = typename LayerPolicy::Type;
520     using LayerDesc = typename LayerPolicy::Desc;
521     DummyLayer<LayerType, LayerDesc> layer;
522
523     std::stringstream ss;
524     ss << LayerPolicy::NameStr << " layer type mismatches expected layer type value.";
525     bool v = Type == layer.m_Layer->GetType();
526     BOOST_CHECK_MESSAGE(v, ss.str());
527     return v;
528 };
529
530 template<armnn::LayerType Type>
531 bool LayerTypeMatchesTestImpl(Tag<armnn::LayerType::LastLayer>)
532 {
533     return TestLayerTypeMatches<Type>();
534 };
535
536 template<armnn::LayerType Type>
537 bool LayerTypeMatchesTestImpl(Tag<Type>)
538 {
539     return TestLayerTypeMatches<Type>() &&
540         LayerTypeMatchesTestImpl<NextType(Type)>(Tag<NextType(Type)>());
541 };
542
543 template<typename FactoryType, typename LayerType, armnn::DataType InputDataType , armnn::DataType OutputDataType>
544 bool IsConvertLayerSupportedTests(std::string& reasonIfUnsupported)
545 {
546     armnn::Graph graph;
547     LayerType* const layer = graph.AddLayer<LayerType>("LayerName");
548
549     armnn::Layer* const input = graph.AddLayer<armnn::InputLayer>(0, "input");
550     armnn::Layer* const output = graph.AddLayer<armnn::OutputLayer>(0, "output");
551
552     armnn::TensorInfo inputTensorInfo({1, 3, 2, 3}, InputDataType);
553     armnn::TensorInfo outputTensorInfo({1, 3, 2, 3}, OutputDataType);
554
555     input->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
556     input->GetOutputHandler(0).SetTensorInfo(inputTensorInfo);
557     layer->GetOutputSlot(0).Connect(output->GetInputSlot(0));
558     layer->GetOutputHandler(0).SetTensorInfo(outputTensorInfo);
559
560     bool result = FactoryType::IsLayerSupported(*layer, InputDataType, reasonIfUnsupported);
561
562     return result;
563 };
564
565 } //namespace