2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
9 #include <boost/core/ignore_unused.hpp>
13 armnn::Graph dummyGraph;
15 // Make a dummy TensorInfo object.
16 template<armnn::DataType DataType>
17 armnn::TensorInfo MakeDummyTensorInfo()
19 return armnn::TensorInfo({2,2,2,2}, DataType);
23 // Make a dummy WorkloadInfo using a dummy TensorInfo.
24 template<armnn::DataType DataType>
25 armnn::WorkloadInfo MakeDummyWorkloadInfo(unsigned int numInputs, unsigned int numOutputs)
27 armnn::WorkloadInfo info;
28 for (unsigned int i=0; i < numInputs; i++)
30 info.m_InputTensorInfos.push_back(MakeDummyTensorInfo<DataType>());
32 for (unsigned int o=0; o < numOutputs; o++)
34 info.m_OutputTensorInfos.push_back(MakeDummyTensorInfo<DataType>());
39 // Template class to create a dummy layer (2 parameters).
40 template<typename LayerType, typename DescType = typename LayerType::DescriptorType>
45 m_Layer = dummyGraph.AddLayer<LayerType>(DescType(), "");
49 dummyGraph.EraseLayer(m_Layer);
54 // Template class to create a dummy layer (1 parameter).
55 template<typename LayerType>
56 struct DummyLayer<LayerType, void>
60 m_Layer = dummyGraph.AddLayer<LayerType>("");
64 dummyGraph.EraseLayer(m_Layer);
70 struct DummyLayer<armnn::BatchNormalizationLayer>
74 m_Layer = dummyGraph.AddLayer<armnn::BatchNormalizationLayer>(armnn::BatchNormalizationDescriptor(), "");
75 m_Layer->m_Mean = std::make_unique<armnn::ScopedCpuTensorHandle>(
76 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
77 m_Layer->m_Variance = std::make_unique<armnn::ScopedCpuTensorHandle>(
78 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
79 m_Layer->m_Beta = std::make_unique<armnn::ScopedCpuTensorHandle>(
80 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
81 m_Layer->m_Gamma = std::make_unique<armnn::ScopedCpuTensorHandle>(
82 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
86 dummyGraph.EraseLayer(m_Layer);
88 armnn::BatchNormalizationLayer* m_Layer;
93 struct DummyLayer<armnn::ConstantLayer, void>
97 m_Layer = dummyGraph.AddLayer<armnn::ConstantLayer>("");
101 dummyGraph.EraseLayer(m_Layer);
103 armnn::ConstantLayer* m_Layer;
107 struct DummyLayer<armnn::InputLayer, armnn::LayerBindingId>
111 m_Layer = dummyGraph.AddLayer<armnn::InputLayer>(armnn::LayerBindingId(), "");
116 dummyGraph.EraseLayer(m_Layer);
118 armnn::InputLayer* m_Layer;
122 struct DummyLayer<armnn::MergerLayer>
126 armnn::OriginsDescriptor desc(2);
127 m_Layer = dummyGraph.AddLayer<armnn::MergerLayer>(desc, "");
132 dummyGraph.EraseLayer(m_Layer);
134 armnn::MergerLayer* m_Layer;
138 struct DummyLayer<armnn::OutputLayer, armnn::LayerBindingId>
142 m_Layer = dummyGraph.AddLayer<armnn::OutputLayer>(armnn::LayerBindingId(), "");
147 dummyGraph.EraseLayer(m_Layer);
149 armnn::OutputLayer* m_Layer;
153 struct DummyLayer<armnn::SplitterLayer>
157 armnn::ViewsDescriptor desc(1);
158 m_Layer = dummyGraph.AddLayer<armnn::SplitterLayer>(desc, "");
163 dummyGraph.EraseLayer(m_Layer);
165 armnn::SplitterLayer* m_Layer;
168 template <typename ConvolutionLayerType>
169 struct DummyConvolutionLayer
171 DummyConvolutionLayer()
173 typename ConvolutionLayerType::DescriptorType desc;
174 m_Layer = dummyGraph.AddLayer<ConvolutionLayerType>(desc, "");
175 m_Layer->m_Weight = std::make_unique<armnn::ScopedCpuTensorHandle>(
176 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
177 m_Layer->m_Bias = std::make_unique<armnn::ScopedCpuTensorHandle>(
178 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
180 ~DummyConvolutionLayer()
182 dummyGraph.EraseLayer(m_Layer);
184 ConvolutionLayerType* m_Layer;
188 struct DummyLayer<armnn::Convolution2dLayer>
189 : public DummyConvolutionLayer<armnn::Convolution2dLayer>
194 struct DummyLayer<armnn::DepthwiseConvolution2dLayer>
195 : public DummyConvolutionLayer<armnn::DepthwiseConvolution2dLayer>
199 template <typename LstmLayerType>
200 struct DummyLstmLayer
204 typename LstmLayerType::DescriptorType desc;
205 desc.m_CifgEnabled = false;
207 m_Layer = dummyGraph.AddLayer<LstmLayerType>(armnn::LstmDescriptor(), "");
208 m_Layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
209 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
210 m_Layer->m_BasicParameters.m_InputToCellWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
211 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
212 m_Layer->m_BasicParameters.m_InputToOutputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
213 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
214 m_Layer->m_BasicParameters.m_RecurrentToForgetWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
215 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
216 m_Layer->m_BasicParameters.m_RecurrentToCellWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
217 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
218 m_Layer->m_BasicParameters.m_RecurrentToOutputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
219 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
220 m_Layer->m_BasicParameters.m_ForgetGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
221 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
222 m_Layer->m_BasicParameters.m_CellBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
223 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
224 m_Layer->m_BasicParameters.m_OutputGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
225 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
227 m_Layer->m_CifgParameters.m_InputToInputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
228 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
229 m_Layer->m_CifgParameters.m_RecurrentToInputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
230 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
231 m_Layer->m_CifgParameters.m_CellToInputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
232 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
233 m_Layer->m_CifgParameters.m_InputGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
234 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
238 dummyGraph.EraseLayer(m_Layer);
240 armnn::LstmLayer* m_Layer;
244 struct DummyLayer<armnn::LstmLayer>
245 : public DummyLstmLayer<armnn::LstmLayer>
250 struct DummyLayer<armnn::FullyConnectedLayer>
254 armnn::FullyConnectedLayer::DescriptorType desc;
255 m_Layer = dummyGraph.AddLayer<armnn::FullyConnectedLayer>(desc, "");
256 m_Layer->m_Weight = std::make_unique<armnn::ScopedCpuTensorHandle>(
257 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
261 dummyGraph.EraseLayer(m_Layer);
263 armnn::FullyConnectedLayer* m_Layer;
266 // Tag for giving LayerType entries a unique strong type each.
267 template<armnn::LayerType>
270 #define DECLARE_LAYER_POLICY_CUSTOM_PARAM(name, descType) \
271 template<armnn::DataType DataType> \
272 struct LayerTypePolicy<armnn::LayerType::name, DataType> \
274 using Type = armnn::name##Layer; \
275 using Desc = descType; \
276 using QueueDesc = armnn::name##QueueDescriptor; \
277 constexpr static const char* NameStr = #name; \
279 static std::unique_ptr<armnn::IWorkload> MakeDummyWorkload(armnn::IWorkloadFactory *factory, \
280 unsigned int nIn, unsigned int nOut) \
283 armnn::WorkloadInfo info = MakeDummyWorkloadInfo<DataType>(nIn, nOut); \
284 return factory->Create##name(desc, info); \
288 // Define a layer policy specialization for use with the IsLayerSupported tests.
289 // Use this version for layers whose constructor takes 1 parameter(name).
290 #define DECLARE_LAYER_POLICY_1_PARAM(name) DECLARE_LAYER_POLICY_CUSTOM_PARAM(name, void)
292 // Define a layer policy specialization for use with the IsLayerSupported tests.
293 // Use this version for layers whose constructor takes 2 parameters(descriptor and name).
294 #define DECLARE_LAYER_POLICY_2_PARAM(name) DECLARE_LAYER_POLICY_CUSTOM_PARAM(name, armnn::name##Descriptor)
296 // Layer policy template.
297 template<armnn::LayerType Type, armnn::DataType DataType>
298 struct LayerTypePolicy;
300 // Every entry in the armnn::LayerType enum must be accounted for below.
301 DECLARE_LAYER_POLICY_2_PARAM(Activation)
303 DECLARE_LAYER_POLICY_1_PARAM(Addition)
305 DECLARE_LAYER_POLICY_2_PARAM(BatchNormalization)
307 DECLARE_LAYER_POLICY_1_PARAM(Constant)
309 DECLARE_LAYER_POLICY_1_PARAM(ConvertFp16ToFp32)
311 DECLARE_LAYER_POLICY_1_PARAM(ConvertFp32ToFp16)
313 DECLARE_LAYER_POLICY_2_PARAM(Convolution2d)
315 DECLARE_LAYER_POLICY_1_PARAM(MemCopy)
317 DECLARE_LAYER_POLICY_2_PARAM(DepthwiseConvolution2d)
319 DECLARE_LAYER_POLICY_2_PARAM(FakeQuantization)
321 DECLARE_LAYER_POLICY_1_PARAM(Floor)
323 DECLARE_LAYER_POLICY_2_PARAM(FullyConnected)
325 DECLARE_LAYER_POLICY_CUSTOM_PARAM(Input, armnn::LayerBindingId)
327 DECLARE_LAYER_POLICY_2_PARAM(L2Normalization)
329 DECLARE_LAYER_POLICY_2_PARAM(Lstm)
331 DECLARE_LAYER_POLICY_2_PARAM(Mean)
333 DECLARE_LAYER_POLICY_2_PARAM(Merger)
335 DECLARE_LAYER_POLICY_1_PARAM(Multiplication)
337 DECLARE_LAYER_POLICY_2_PARAM(Normalization)
339 DECLARE_LAYER_POLICY_CUSTOM_PARAM(Output, armnn::LayerBindingId)
341 DECLARE_LAYER_POLICY_2_PARAM(Pad)
343 DECLARE_LAYER_POLICY_2_PARAM(Permute)
345 DECLARE_LAYER_POLICY_2_PARAM(Pooling2d)
347 DECLARE_LAYER_POLICY_1_PARAM(Division)
349 DECLARE_LAYER_POLICY_2_PARAM(ResizeBilinear)
351 DECLARE_LAYER_POLICY_2_PARAM(Reshape)
353 DECLARE_LAYER_POLICY_2_PARAM(Softmax)
355 DECLARE_LAYER_POLICY_2_PARAM(SpaceToBatchNd)
357 DECLARE_LAYER_POLICY_2_PARAM(Splitter)
359 DECLARE_LAYER_POLICY_1_PARAM(Subtraction)
362 // Generic implementation to get the number of input slots for a given layer type;
363 template<armnn::LayerType Type>
364 unsigned int GetNumInputs(const armnn::Layer& layer)
366 return layer.GetNumInputSlots();
369 // Generic implementation to get the number of output slots for a given layer type;
370 template<armnn::LayerType Type>
371 unsigned int GetNumOutputs(const armnn::Layer& layer)
373 return layer.GetNumOutputSlots();
377 unsigned int GetNumInputs<armnn::LayerType::Merger>(const armnn::Layer& layer)
379 boost::ignore_unused(layer);
383 // Tests that the IsLayerSupported() function returns the correct value.
384 // We determined the correct value by *trying* to create the relevant workload and seeing if it matches what we expect.
385 // Returns true if expectations are met, otherwise returns false.
386 template<typename FactoryType, armnn::DataType DataType, armnn::LayerType Type>
387 bool IsLayerSupportedTest(FactoryType *factory, Tag<Type>)
389 using LayerPolicy = LayerTypePolicy<Type, DataType>;
390 using LayerType = typename LayerPolicy::Type;
391 using LayerDesc = typename LayerPolicy::Desc;
392 DummyLayer<LayerType, LayerDesc> layer;
394 unsigned int numIn = GetNumInputs<Type>(*layer.m_Layer);
395 unsigned int numOut = GetNumOutputs<Type>(*layer.m_Layer);
397 // Make another dummy layer just to make IsLayerSupported have valid inputs.
398 DummyLayer<armnn::ConstantLayer, void> previousLayer;
399 // Set output of the previous layer to a dummy tensor.
400 armnn::TensorInfo output = MakeDummyTensorInfo<DataType>();
401 previousLayer.m_Layer->GetOutputSlot(0).SetTensorInfo(output);
402 // Connect all outputs of the previous layer to inputs of tested layer.
403 for (unsigned int i = 0; i < numIn; i++)
405 armnn::IOutputSlot& previousLayerOutputSlot = previousLayer.m_Layer->GetOutputSlot(0);
406 armnn::IInputSlot& layerInputSlot = layer.m_Layer->GetInputSlot(i);
407 previousLayerOutputSlot.Connect(layerInputSlot);
409 // Set outputs of tested layer to a dummy tensor.
410 for (unsigned int i = 0; i < numOut; i++)
412 layer.m_Layer->GetOutputSlot(0).SetTensorInfo(output);
415 std::string layerName = LayerPolicy::NameStr;
416 std::string reasonIfUnsupported;
417 if (FactoryType::IsLayerSupported(*layer.m_Layer, DataType, reasonIfUnsupported))
419 std::string errorMsg = " layer expected support but found none.";
422 bool retVal = LayerPolicy::MakeDummyWorkload(factory, numIn, numOut).get() != nullptr;
423 BOOST_CHECK_MESSAGE(retVal, layerName << errorMsg);
426 catch(const armnn::InvalidArgumentException& e)
428 boost::ignore_unused(e);
429 // This is ok since we throw InvalidArgumentException when creating the dummy workload.
432 catch(const std::exception& e)
435 BOOST_TEST_ERROR(layerName << ": " << errorMsg);
440 errorMsg = "Unexpected error while testing support for ";
441 BOOST_TEST_ERROR(errorMsg << layerName);
447 std::string errorMsg = "layer expected no support (giving reason: " + reasonIfUnsupported + ") but found some.";
450 bool retVal = LayerPolicy::MakeDummyWorkload(factory, numIn, numOut).get() == nullptr;
451 BOOST_CHECK_MESSAGE(retVal, layerName << errorMsg);
454 // These two exceptions are ok: For workloads that are partially supported, attempting to instantiate them
455 // using parameters that make IsLayerSupported() return false should throw an
456 // InvalidArgumentException or UnimplementedException.
457 catch(const armnn::InvalidArgumentException& e)
459 boost::ignore_unused(e);
462 catch(const armnn::UnimplementedException& e)
464 boost::ignore_unused(e);
467 catch(const std::exception& e)
470 BOOST_TEST_ERROR(layerName << ": " << errorMsg);
475 errorMsg = "Unexpected error while testing support for ";
476 BOOST_TEST_ERROR(errorMsg << layerName);
482 // Helper function to compute the next type in the LayerType enum.
483 constexpr armnn::LayerType NextType(armnn::LayerType type)
485 return static_cast<armnn::LayerType>(static_cast<int>(type)+1);
488 // Termination function for determining the end of the LayerType enumeration.
489 template<typename FactoryType, armnn::DataType DataType, armnn::LayerType Type>
490 bool IsLayerSupportedTestsImpl(FactoryType *factory, Tag<armnn::LayerType::LastLayer>)
492 return IsLayerSupportedTest<FactoryType, DataType, Type>(factory, Tag<Type>());
495 // Recursive function to test and enter in the LayerType enum and then iterate on the next entry.
496 template<typename FactoryType, armnn::DataType DataType, armnn::LayerType Type>
497 bool IsLayerSupportedTestsImpl(FactoryType *factory, Tag<Type>)
499 bool v = IsLayerSupportedTest<FactoryType, DataType, Type>(factory, Tag<Type>());
502 IsLayerSupportedTestsImpl<FactoryType, DataType, NextType(Type)>
503 (factory, Tag<NextType(Type)>());
506 // Helper function to pass through to the test framework.
507 template<typename FactoryType, armnn::DataType DataType>
508 bool IsLayerSupportedTests(FactoryType *factory)
510 return IsLayerSupportedTestsImpl<FactoryType, DataType>(factory, Tag<armnn::LayerType::FirstLayer>());
513 template<armnn::LayerType Type>
514 bool TestLayerTypeMatches()
516 using LayerPolicy = LayerTypePolicy<Type, armnn::DataType::Float32>;
517 using LayerType = typename LayerPolicy::Type;
518 using LayerDesc = typename LayerPolicy::Desc;
519 DummyLayer<LayerType, LayerDesc> layer;
521 std::stringstream ss;
522 ss << LayerPolicy::NameStr << " layer type mismatches expected layer type value.";
523 bool v = Type == layer.m_Layer->GetType();
524 BOOST_CHECK_MESSAGE(v, ss.str());
528 template<armnn::LayerType Type>
529 bool LayerTypeMatchesTestImpl(Tag<armnn::LayerType::LastLayer>)
531 return TestLayerTypeMatches<Type>();
534 template<armnn::LayerType Type>
535 bool LayerTypeMatchesTestImpl(Tag<Type>)
537 return TestLayerTypeMatches<Type>() &&
538 LayerTypeMatchesTestImpl<NextType(Type)>(Tag<NextType(Type)>());
541 template<typename FactoryType, typename LayerType, armnn::DataType InputDataType , armnn::DataType OutputDataType>
542 bool IsConvertLayerSupportedTests(std::string& reasonIfUnsupported)
545 LayerType* const layer = graph.AddLayer<LayerType>("LayerName");
547 armnn::Layer* const input = graph.AddLayer<armnn::InputLayer>(0, "input");
548 armnn::Layer* const output = graph.AddLayer<armnn::OutputLayer>(0, "output");
550 armnn::TensorInfo inputTensorInfo({1, 3, 2, 3}, InputDataType);
551 armnn::TensorInfo outputTensorInfo({1, 3, 2, 3}, OutputDataType);
553 input->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
554 input->GetOutputHandler(0).SetTensorInfo(inputTensorInfo);
555 layer->GetOutputSlot(0).Connect(output->GetInputSlot(0));
556 layer->GetOutputHandler(0).SetTensorInfo(outputTensorInfo);
558 bool result = FactoryType::IsLayerSupported(*layer, InputDataType, reasonIfUnsupported);