2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
9 #include <backendsCommon/WorkloadFactory.hpp>
11 #include <boost/core/ignore_unused.hpp>
15 armnn::Graph dummyGraph;
17 // Make a dummy TensorInfo object.
18 template<armnn::DataType DataType>
19 armnn::TensorInfo MakeDummyTensorInfo()
21 return armnn::TensorInfo({2,2,2,2}, DataType);
25 // Make a dummy WorkloadInfo using a dummy TensorInfo.
26 template<armnn::DataType DataType>
27 armnn::WorkloadInfo MakeDummyWorkloadInfo(unsigned int numInputs, unsigned int numOutputs)
29 armnn::WorkloadInfo info;
30 for (unsigned int i=0; i < numInputs; i++)
32 info.m_InputTensorInfos.push_back(MakeDummyTensorInfo<DataType>());
34 for (unsigned int o=0; o < numOutputs; o++)
36 info.m_OutputTensorInfos.push_back(MakeDummyTensorInfo<DataType>());
41 // Template class to create a dummy layer (2 parameters).
42 template<typename LayerType, typename DescType = typename LayerType::DescriptorType>
47 m_Layer = dummyGraph.AddLayer<LayerType>(DescType(), "");
51 dummyGraph.EraseLayer(m_Layer);
56 // Template class to create a dummy layer (1 parameter).
57 template<typename LayerType>
58 struct DummyLayer<LayerType, void>
62 m_Layer = dummyGraph.AddLayer<LayerType>("");
66 dummyGraph.EraseLayer(m_Layer);
72 struct DummyLayer<armnn::BatchNormalizationLayer>
76 m_Layer = dummyGraph.AddLayer<armnn::BatchNormalizationLayer>(armnn::BatchNormalizationDescriptor(), "");
77 m_Layer->m_Mean = std::make_unique<armnn::ScopedCpuTensorHandle>(
78 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
79 m_Layer->m_Variance = std::make_unique<armnn::ScopedCpuTensorHandle>(
80 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
81 m_Layer->m_Beta = std::make_unique<armnn::ScopedCpuTensorHandle>(
82 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
83 m_Layer->m_Gamma = std::make_unique<armnn::ScopedCpuTensorHandle>(
84 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
88 dummyGraph.EraseLayer(m_Layer);
90 armnn::BatchNormalizationLayer* m_Layer;
95 struct DummyLayer<armnn::ConstantLayer, void>
99 m_Layer = dummyGraph.AddLayer<armnn::ConstantLayer>("");
103 dummyGraph.EraseLayer(m_Layer);
105 armnn::ConstantLayer* m_Layer;
109 struct DummyLayer<armnn::InputLayer, armnn::LayerBindingId>
113 m_Layer = dummyGraph.AddLayer<armnn::InputLayer>(armnn::LayerBindingId(), "");
118 dummyGraph.EraseLayer(m_Layer);
120 armnn::InputLayer* m_Layer;
124 struct DummyLayer<armnn::MergerLayer>
128 armnn::OriginsDescriptor desc(2);
129 m_Layer = dummyGraph.AddLayer<armnn::MergerLayer>(desc, "");
134 dummyGraph.EraseLayer(m_Layer);
136 armnn::MergerLayer* m_Layer;
140 struct DummyLayer<armnn::OutputLayer, armnn::LayerBindingId>
144 m_Layer = dummyGraph.AddLayer<armnn::OutputLayer>(armnn::LayerBindingId(), "");
149 dummyGraph.EraseLayer(m_Layer);
151 armnn::OutputLayer* m_Layer;
155 struct DummyLayer<armnn::SplitterLayer>
159 armnn::ViewsDescriptor desc(1);
160 m_Layer = dummyGraph.AddLayer<armnn::SplitterLayer>(desc, "");
165 dummyGraph.EraseLayer(m_Layer);
167 armnn::SplitterLayer* m_Layer;
170 template <typename ConvolutionLayerType>
171 struct DummyConvolutionLayer
173 DummyConvolutionLayer()
175 typename ConvolutionLayerType::DescriptorType desc;
176 m_Layer = dummyGraph.AddLayer<ConvolutionLayerType>(desc, "");
177 m_Layer->m_Weight = std::make_unique<armnn::ScopedCpuTensorHandle>(
178 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
179 m_Layer->m_Bias = std::make_unique<armnn::ScopedCpuTensorHandle>(
180 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
182 ~DummyConvolutionLayer()
184 dummyGraph.EraseLayer(m_Layer);
186 ConvolutionLayerType* m_Layer;
190 struct DummyLayer<armnn::Convolution2dLayer>
191 : public DummyConvolutionLayer<armnn::Convolution2dLayer>
196 struct DummyLayer<armnn::DepthwiseConvolution2dLayer>
197 : public DummyConvolutionLayer<armnn::DepthwiseConvolution2dLayer>
201 template <typename LstmLayerType>
202 struct DummyLstmLayer
206 typename LstmLayerType::DescriptorType desc;
207 desc.m_CifgEnabled = false;
209 m_Layer = dummyGraph.AddLayer<LstmLayerType>(armnn::LstmDescriptor(), "");
210 m_Layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
211 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
212 m_Layer->m_BasicParameters.m_InputToCellWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
213 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
214 m_Layer->m_BasicParameters.m_InputToOutputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
215 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
216 m_Layer->m_BasicParameters.m_RecurrentToForgetWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
217 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
218 m_Layer->m_BasicParameters.m_RecurrentToCellWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
219 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
220 m_Layer->m_BasicParameters.m_RecurrentToOutputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
221 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
222 m_Layer->m_BasicParameters.m_ForgetGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
223 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
224 m_Layer->m_BasicParameters.m_CellBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
225 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
226 m_Layer->m_BasicParameters.m_OutputGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
227 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
229 m_Layer->m_CifgParameters.m_InputToInputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
230 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
231 m_Layer->m_CifgParameters.m_RecurrentToInputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
232 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
233 m_Layer->m_CifgParameters.m_CellToInputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
234 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
235 m_Layer->m_CifgParameters.m_InputGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
236 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
240 dummyGraph.EraseLayer(m_Layer);
242 armnn::LstmLayer* m_Layer;
246 struct DummyLayer<armnn::LstmLayer>
247 : public DummyLstmLayer<armnn::LstmLayer>
252 struct DummyLayer<armnn::FullyConnectedLayer>
256 armnn::FullyConnectedLayer::DescriptorType desc;
257 m_Layer = dummyGraph.AddLayer<armnn::FullyConnectedLayer>(desc, "");
258 m_Layer->m_Weight = std::make_unique<armnn::ScopedCpuTensorHandle>(
259 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
263 dummyGraph.EraseLayer(m_Layer);
265 armnn::FullyConnectedLayer* m_Layer;
268 // Tag for giving LayerType entries a unique strong type each.
269 template<armnn::LayerType>
272 #define DECLARE_LAYER_POLICY_CUSTOM_PARAM(name, descType) \
273 template<armnn::DataType DataType> \
274 struct LayerTypePolicy<armnn::LayerType::name, DataType> \
276 using Type = armnn::name##Layer; \
277 using Desc = descType; \
278 using QueueDesc = armnn::name##QueueDescriptor; \
279 constexpr static const char* NameStr = #name; \
281 static std::unique_ptr<armnn::IWorkload> MakeDummyWorkload(armnn::IWorkloadFactory *factory, \
282 unsigned int nIn, unsigned int nOut) \
285 armnn::WorkloadInfo info = MakeDummyWorkloadInfo<DataType>(nIn, nOut); \
286 return factory->Create##name(desc, info); \
290 // Define a layer policy specialization for use with the IsLayerSupported tests.
291 // Use this version for layers whose constructor takes 1 parameter(name).
292 #define DECLARE_LAYER_POLICY_1_PARAM(name) DECLARE_LAYER_POLICY_CUSTOM_PARAM(name, void)
294 // Define a layer policy specialization for use with the IsLayerSupported tests.
295 // Use this version for layers whose constructor takes 2 parameters(descriptor and name).
296 #define DECLARE_LAYER_POLICY_2_PARAM(name) DECLARE_LAYER_POLICY_CUSTOM_PARAM(name, armnn::name##Descriptor)
298 // Layer policy template.
299 template<armnn::LayerType Type, armnn::DataType DataType>
300 struct LayerTypePolicy;
302 // Every entry in the armnn::LayerType enum must be accounted for below.
303 DECLARE_LAYER_POLICY_2_PARAM(Activation)
305 DECLARE_LAYER_POLICY_1_PARAM(Addition)
307 DECLARE_LAYER_POLICY_2_PARAM(BatchNormalization)
309 DECLARE_LAYER_POLICY_1_PARAM(Constant)
311 DECLARE_LAYER_POLICY_1_PARAM(ConvertFp16ToFp32)
313 DECLARE_LAYER_POLICY_1_PARAM(ConvertFp32ToFp16)
315 DECLARE_LAYER_POLICY_2_PARAM(Convolution2d)
317 DECLARE_LAYER_POLICY_1_PARAM(MemCopy)
319 DECLARE_LAYER_POLICY_2_PARAM(DepthwiseConvolution2d)
321 DECLARE_LAYER_POLICY_2_PARAM(FakeQuantization)
323 DECLARE_LAYER_POLICY_1_PARAM(Floor)
325 DECLARE_LAYER_POLICY_2_PARAM(FullyConnected)
327 DECLARE_LAYER_POLICY_CUSTOM_PARAM(Input, armnn::LayerBindingId)
329 DECLARE_LAYER_POLICY_2_PARAM(L2Normalization)
331 DECLARE_LAYER_POLICY_2_PARAM(Lstm)
333 DECLARE_LAYER_POLICY_2_PARAM(Mean)
335 DECLARE_LAYER_POLICY_2_PARAM(Merger)
337 DECLARE_LAYER_POLICY_1_PARAM(Multiplication)
339 DECLARE_LAYER_POLICY_2_PARAM(Normalization)
341 DECLARE_LAYER_POLICY_CUSTOM_PARAM(Output, armnn::LayerBindingId)
343 DECLARE_LAYER_POLICY_2_PARAM(Pad)
345 DECLARE_LAYER_POLICY_2_PARAM(Permute)
347 DECLARE_LAYER_POLICY_2_PARAM(Pooling2d)
349 DECLARE_LAYER_POLICY_1_PARAM(Division)
351 DECLARE_LAYER_POLICY_2_PARAM(ResizeBilinear)
353 DECLARE_LAYER_POLICY_2_PARAM(Reshape)
355 DECLARE_LAYER_POLICY_2_PARAM(Softmax)
357 DECLARE_LAYER_POLICY_2_PARAM(SpaceToBatchNd)
359 DECLARE_LAYER_POLICY_2_PARAM(Splitter)
361 DECLARE_LAYER_POLICY_1_PARAM(Subtraction)
364 // Generic implementation to get the number of input slots for a given layer type;
365 template<armnn::LayerType Type>
366 unsigned int GetNumInputs(const armnn::Layer& layer)
368 return layer.GetNumInputSlots();
371 // Generic implementation to get the number of output slots for a given layer type;
372 template<armnn::LayerType Type>
373 unsigned int GetNumOutputs(const armnn::Layer& layer)
375 return layer.GetNumOutputSlots();
379 unsigned int GetNumInputs<armnn::LayerType::Merger>(const armnn::Layer& layer)
381 boost::ignore_unused(layer);
385 // Tests that the IsLayerSupported() function returns the correct value.
386 // We determined the correct value by *trying* to create the relevant workload and seeing if it matches what we expect.
387 // Returns true if expectations are met, otherwise returns false.
388 template<typename FactoryType, armnn::DataType DataType, armnn::LayerType Type>
389 bool IsLayerSupportedTest(FactoryType *factory, Tag<Type>)
391 using LayerPolicy = LayerTypePolicy<Type, DataType>;
392 using LayerType = typename LayerPolicy::Type;
393 using LayerDesc = typename LayerPolicy::Desc;
394 DummyLayer<LayerType, LayerDesc> layer;
396 unsigned int numIn = GetNumInputs<Type>(*layer.m_Layer);
397 unsigned int numOut = GetNumOutputs<Type>(*layer.m_Layer);
399 // Make another dummy layer just to make IsLayerSupported have valid inputs.
400 DummyLayer<armnn::ConstantLayer, void> previousLayer;
401 // Set output of the previous layer to a dummy tensor.
402 armnn::TensorInfo output = MakeDummyTensorInfo<DataType>();
403 previousLayer.m_Layer->GetOutputSlot(0).SetTensorInfo(output);
404 // Connect all outputs of the previous layer to inputs of tested layer.
405 for (unsigned int i = 0; i < numIn; i++)
407 armnn::IOutputSlot& previousLayerOutputSlot = previousLayer.m_Layer->GetOutputSlot(0);
408 armnn::IInputSlot& layerInputSlot = layer.m_Layer->GetInputSlot(i);
409 previousLayerOutputSlot.Connect(layerInputSlot);
411 // Set outputs of tested layer to a dummy tensor.
412 for (unsigned int i = 0; i < numOut; i++)
414 layer.m_Layer->GetOutputSlot(0).SetTensorInfo(output);
417 std::string layerName = LayerPolicy::NameStr;
418 std::string reasonIfUnsupported;
419 if (FactoryType::IsLayerSupported(*layer.m_Layer, DataType, reasonIfUnsupported))
421 std::string errorMsg = " layer expected support but found none.";
424 bool retVal = LayerPolicy::MakeDummyWorkload(factory, numIn, numOut).get() != nullptr;
425 BOOST_CHECK_MESSAGE(retVal, layerName << errorMsg);
428 catch(const armnn::InvalidArgumentException& e)
430 boost::ignore_unused(e);
431 // This is ok since we throw InvalidArgumentException when creating the dummy workload.
434 catch(const std::exception& e)
437 BOOST_TEST_ERROR(layerName << ": " << errorMsg);
442 errorMsg = "Unexpected error while testing support for ";
443 BOOST_TEST_ERROR(errorMsg << layerName);
449 std::string errorMsg = "layer expected no support (giving reason: " + reasonIfUnsupported + ") but found some.";
452 bool retVal = LayerPolicy::MakeDummyWorkload(factory, numIn, numOut).get() == nullptr;
453 BOOST_CHECK_MESSAGE(retVal, layerName << errorMsg);
456 // These two exceptions are ok: For workloads that are partially supported, attempting to instantiate them
457 // using parameters that make IsLayerSupported() return false should throw an
458 // InvalidArgumentException or UnimplementedException.
459 catch(const armnn::InvalidArgumentException& e)
461 boost::ignore_unused(e);
464 catch(const armnn::UnimplementedException& e)
466 boost::ignore_unused(e);
469 catch(const std::exception& e)
472 BOOST_TEST_ERROR(layerName << ": " << errorMsg);
477 errorMsg = "Unexpected error while testing support for ";
478 BOOST_TEST_ERROR(errorMsg << layerName);
484 // Helper function to compute the next type in the LayerType enum.
485 constexpr armnn::LayerType NextType(armnn::LayerType type)
487 return static_cast<armnn::LayerType>(static_cast<int>(type)+1);
490 // Termination function for determining the end of the LayerType enumeration.
491 template<typename FactoryType, armnn::DataType DataType, armnn::LayerType Type>
492 bool IsLayerSupportedTestsImpl(FactoryType *factory, Tag<armnn::LayerType::LastLayer>)
494 return IsLayerSupportedTest<FactoryType, DataType, Type>(factory, Tag<Type>());
497 // Recursive function to test and enter in the LayerType enum and then iterate on the next entry.
498 template<typename FactoryType, armnn::DataType DataType, armnn::LayerType Type>
499 bool IsLayerSupportedTestsImpl(FactoryType *factory, Tag<Type>)
501 bool v = IsLayerSupportedTest<FactoryType, DataType, Type>(factory, Tag<Type>());
504 IsLayerSupportedTestsImpl<FactoryType, DataType, NextType(Type)>
505 (factory, Tag<NextType(Type)>());
508 // Helper function to pass through to the test framework.
509 template<typename FactoryType, armnn::DataType DataType>
510 bool IsLayerSupportedTests(FactoryType *factory)
512 return IsLayerSupportedTestsImpl<FactoryType, DataType>(factory, Tag<armnn::LayerType::FirstLayer>());
515 template<armnn::LayerType Type>
516 bool TestLayerTypeMatches()
518 using LayerPolicy = LayerTypePolicy<Type, armnn::DataType::Float32>;
519 using LayerType = typename LayerPolicy::Type;
520 using LayerDesc = typename LayerPolicy::Desc;
521 DummyLayer<LayerType, LayerDesc> layer;
523 std::stringstream ss;
524 ss << LayerPolicy::NameStr << " layer type mismatches expected layer type value.";
525 bool v = Type == layer.m_Layer->GetType();
526 BOOST_CHECK_MESSAGE(v, ss.str());
530 template<armnn::LayerType Type>
531 bool LayerTypeMatchesTestImpl(Tag<armnn::LayerType::LastLayer>)
533 return TestLayerTypeMatches<Type>();
536 template<armnn::LayerType Type>
537 bool LayerTypeMatchesTestImpl(Tag<Type>)
539 return TestLayerTypeMatches<Type>() &&
540 LayerTypeMatchesTestImpl<NextType(Type)>(Tag<NextType(Type)>());
543 template<typename FactoryType, typename LayerType, armnn::DataType InputDataType , armnn::DataType OutputDataType>
544 bool IsConvertLayerSupportedTests(std::string& reasonIfUnsupported)
547 LayerType* const layer = graph.AddLayer<LayerType>("LayerName");
549 armnn::Layer* const input = graph.AddLayer<armnn::InputLayer>(0, "input");
550 armnn::Layer* const output = graph.AddLayer<armnn::OutputLayer>(0, "output");
552 armnn::TensorInfo inputTensorInfo({1, 3, 2, 3}, InputDataType);
553 armnn::TensorInfo outputTensorInfo({1, 3, 2, 3}, OutputDataType);
555 input->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
556 input->GetOutputHandler(0).SetTensorInfo(inputTensorInfo);
557 layer->GetOutputSlot(0).Connect(output->GetInputSlot(0));
558 layer->GetOutputHandler(0).SetTensorInfo(outputTensorInfo);
560 bool result = FactoryType::IsLayerSupported(*layer, InputDataType, reasonIfUnsupported);