2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
9 #include <backendsCommon/WorkloadFactory.hpp>
11 #include <boost/core/ignore_unused.hpp>
15 armnn::Graph dummyGraph;
17 // Make a dummy TensorInfo object.
18 template<armnn::DataType DataType>
19 armnn::TensorInfo MakeDummyTensorInfo()
21 return armnn::TensorInfo({2,2,2,2}, DataType);
25 // Make a dummy WorkloadInfo using a dummy TensorInfo.
26 template<armnn::DataType DataType>
27 armnn::WorkloadInfo MakeDummyWorkloadInfo(unsigned int numInputs, unsigned int numOutputs)
29 armnn::WorkloadInfo info;
31 for (unsigned int i=0; i < numInputs; i++)
33 info.m_InputTensorInfos.push_back(MakeDummyTensorInfo<DataType>());
36 for (unsigned int o=0; o < numOutputs; o++)
38 info.m_OutputTensorInfos.push_back(MakeDummyTensorInfo<DataType>());
44 // Template class to create a dummy layer (2 parameters).
45 template<typename LayerType, typename DescType = typename LayerType::DescriptorType>
50 m_Layer = dummyGraph.AddLayer<LayerType>(DescType(), "");
55 dummyGraph.EraseLayer(m_Layer);
61 // Template class to create a dummy layer (1 parameter).
62 template<typename LayerType>
63 struct DummyLayer<LayerType, void>
67 m_Layer = dummyGraph.AddLayer<LayerType>("");
72 dummyGraph.EraseLayer(m_Layer);
79 struct DummyLayer<armnn::BatchNormalizationLayer>
83 m_Layer = dummyGraph.AddLayer<armnn::BatchNormalizationLayer>(armnn::BatchNormalizationDescriptor(), "");
84 m_Layer->m_Mean = std::make_unique<armnn::ScopedCpuTensorHandle>(
85 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
86 m_Layer->m_Variance = std::make_unique<armnn::ScopedCpuTensorHandle>(
87 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
88 m_Layer->m_Beta = std::make_unique<armnn::ScopedCpuTensorHandle>(
89 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
90 m_Layer->m_Gamma = std::make_unique<armnn::ScopedCpuTensorHandle>(
91 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
96 dummyGraph.EraseLayer(m_Layer);
99 armnn::BatchNormalizationLayer* m_Layer;
103 struct DummyLayer<armnn::BatchToSpaceNdLayer>
107 m_Layer = dummyGraph.AddLayer<armnn::BatchToSpaceNdLayer>(armnn::BatchToSpaceNdDescriptor(), "");
112 dummyGraph.EraseLayer(m_Layer);
115 armnn::BatchToSpaceNdLayer* m_Layer;
119 struct DummyLayer<armnn::ConstantLayer, void>
123 m_Layer = dummyGraph.AddLayer<armnn::ConstantLayer>("");
128 dummyGraph.EraseLayer(m_Layer);
131 armnn::ConstantLayer* m_Layer;
135 struct DummyLayer<armnn::InputLayer, armnn::LayerBindingId>
139 m_Layer = dummyGraph.AddLayer<armnn::InputLayer>(armnn::LayerBindingId(), "");
144 dummyGraph.EraseLayer(m_Layer);
147 armnn::InputLayer* m_Layer;
151 struct DummyLayer<armnn::ConcatLayer>
155 armnn::OriginsDescriptor desc(2);
156 m_Layer = dummyGraph.AddLayer<armnn::ConcatLayer>(desc, "");
161 dummyGraph.EraseLayer(m_Layer);
164 armnn::ConcatLayer* m_Layer;
168 struct DummyLayer<armnn::OutputLayer, armnn::LayerBindingId>
172 m_Layer = dummyGraph.AddLayer<armnn::OutputLayer>(armnn::LayerBindingId(), "");
177 dummyGraph.EraseLayer(m_Layer);
180 armnn::OutputLayer* m_Layer;
184 struct DummyLayer<armnn::SplitterLayer>
188 armnn::ViewsDescriptor desc(1);
189 m_Layer = dummyGraph.AddLayer<armnn::SplitterLayer>(desc, "");
194 dummyGraph.EraseLayer(m_Layer);
197 armnn::SplitterLayer* m_Layer;
200 template <typename ConvolutionLayerType>
201 struct DummyConvolutionLayer
203 DummyConvolutionLayer()
205 typename ConvolutionLayerType::DescriptorType desc;
206 m_Layer = dummyGraph.AddLayer<ConvolutionLayerType>(desc, "");
207 m_Layer->m_Weight = std::make_unique<armnn::ScopedCpuTensorHandle>(
208 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
209 m_Layer->m_Bias = std::make_unique<armnn::ScopedCpuTensorHandle>(
210 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
213 ~DummyConvolutionLayer()
215 dummyGraph.EraseLayer(m_Layer);
218 ConvolutionLayerType* m_Layer;
222 struct DummyLayer<armnn::Convolution2dLayer>
223 : public DummyConvolutionLayer<armnn::Convolution2dLayer>
228 struct DummyLayer<armnn::DepthwiseConvolution2dLayer>
229 : public DummyConvolutionLayer<armnn::DepthwiseConvolution2dLayer>
234 struct DummyLayer<armnn::TransposeConvolution2dLayer>
235 : public DummyConvolutionLayer<armnn::TransposeConvolution2dLayer>
239 template <typename LstmLayerType>
240 struct DummyLstmLayer
244 typename LstmLayerType::DescriptorType desc;
245 desc.m_CifgEnabled = false;
247 m_Layer = dummyGraph.AddLayer<LstmLayerType>(armnn::LstmDescriptor(), "");
248 m_Layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
249 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
250 m_Layer->m_BasicParameters.m_InputToCellWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
251 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
252 m_Layer->m_BasicParameters.m_InputToOutputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
253 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
254 m_Layer->m_BasicParameters.m_RecurrentToForgetWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
255 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
256 m_Layer->m_BasicParameters.m_RecurrentToCellWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
257 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
258 m_Layer->m_BasicParameters.m_RecurrentToOutputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
259 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
260 m_Layer->m_BasicParameters.m_ForgetGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
261 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
262 m_Layer->m_BasicParameters.m_CellBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
263 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
264 m_Layer->m_BasicParameters.m_OutputGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
265 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
267 m_Layer->m_CifgParameters.m_InputToInputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
268 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
269 m_Layer->m_CifgParameters.m_RecurrentToInputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
270 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
271 m_Layer->m_CifgParameters.m_CellToInputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
272 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
273 m_Layer->m_CifgParameters.m_InputGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
274 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
279 dummyGraph.EraseLayer(m_Layer);
282 armnn::LstmLayer* m_Layer;
286 struct DummyLayer<armnn::LstmLayer>
287 : public DummyLstmLayer<armnn::LstmLayer>
292 struct DummyLayer<armnn::QuantizedLstmLayer, void>
296 m_Layer = dummyGraph.AddLayer<armnn::QuantizedLstmLayer>("");
298 m_Layer->m_QuantizedLstmParameters.m_InputToInputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
299 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QuantisedAsymm8));
300 m_Layer->m_QuantizedLstmParameters.m_InputToForgetWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
301 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QuantisedAsymm8));
302 m_Layer->m_QuantizedLstmParameters.m_InputToCellWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
303 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QuantisedAsymm8));
304 m_Layer->m_QuantizedLstmParameters.m_InputToOutputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
305 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QuantisedAsymm8));
307 m_Layer->m_QuantizedLstmParameters.m_RecurrentToInputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
308 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QuantisedAsymm8));
309 m_Layer->m_QuantizedLstmParameters.m_RecurrentToForgetWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
310 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QuantisedAsymm8));
311 m_Layer->m_QuantizedLstmParameters.m_RecurrentToCellWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
312 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QuantisedAsymm8));
313 m_Layer->m_QuantizedLstmParameters.m_RecurrentToOutputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
314 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QuantisedAsymm8));
316 m_Layer->m_QuantizedLstmParameters.m_InputGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
317 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Signed32));
318 m_Layer->m_QuantizedLstmParameters.m_ForgetGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
319 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Signed32));
320 m_Layer->m_QuantizedLstmParameters.m_CellBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
321 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Signed32));
322 m_Layer->m_QuantizedLstmParameters.m_OutputGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
323 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Signed32));
328 dummyGraph.EraseLayer(m_Layer);
331 armnn::QuantizedLstmLayer* m_Layer;
335 struct DummyLayer<armnn::FullyConnectedLayer>
339 armnn::FullyConnectedLayer::DescriptorType desc;
340 m_Layer = dummyGraph.AddLayer<armnn::FullyConnectedLayer>(desc, "");
341 m_Layer->m_Weight = std::make_unique<armnn::ScopedCpuTensorHandle>(
342 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
347 dummyGraph.EraseLayer(m_Layer);
350 armnn::FullyConnectedLayer* m_Layer;
353 // Tag for giving LayerType entries a unique strong type each.
354 template<armnn::LayerType>
357 #define DECLARE_LAYER_POLICY_CUSTOM_PARAM(name, descType) \
358 template<armnn::DataType DataType> \
359 struct LayerTypePolicy<armnn::LayerType::name, DataType> \
361 using Type = armnn::name##Layer; \
362 using Desc = descType; \
363 using QueueDesc = armnn::name##QueueDescriptor; \
364 constexpr static const char* NameStr = #name; \
366 static std::unique_ptr<armnn::IWorkload> MakeDummyWorkload(armnn::IWorkloadFactory *factory, \
367 unsigned int nIn, unsigned int nOut) \
370 armnn::WorkloadInfo info = MakeDummyWorkloadInfo<DataType>(nIn, nOut); \
371 return factory->Create##name(desc, info); \
375 // Define a layer policy specialization for use with the IsLayerSupported tests.
376 // Use this version for layers whose constructor takes 1 parameter(name).
377 #define DECLARE_LAYER_POLICY_1_PARAM(name) DECLARE_LAYER_POLICY_CUSTOM_PARAM(name, void)
379 // Define a layer policy specialization for use with the IsLayerSupported tests.
380 // Use this version for layers whose constructor takes 2 parameters(descriptor and name).
381 #define DECLARE_LAYER_POLICY_2_PARAM(name) DECLARE_LAYER_POLICY_CUSTOM_PARAM(name, armnn::name##Descriptor)
383 // Layer policy template.
384 template<armnn::LayerType Type, armnn::DataType DataType>
385 struct LayerTypePolicy;
387 // Every entry in the armnn::LayerType enum must be accounted for below.
388 DECLARE_LAYER_POLICY_1_PARAM(Abs)
390 DECLARE_LAYER_POLICY_2_PARAM(Activation)
392 DECLARE_LAYER_POLICY_1_PARAM(Addition)
394 DECLARE_LAYER_POLICY_2_PARAM(BatchNormalization)
396 DECLARE_LAYER_POLICY_2_PARAM(BatchToSpaceNd)
398 DECLARE_LAYER_POLICY_2_PARAM(Concat)
400 DECLARE_LAYER_POLICY_1_PARAM(Constant)
402 DECLARE_LAYER_POLICY_1_PARAM(ConvertFp16ToFp32)
404 DECLARE_LAYER_POLICY_1_PARAM(ConvertFp32ToFp16)
406 DECLARE_LAYER_POLICY_2_PARAM(Convolution2d)
408 DECLARE_LAYER_POLICY_1_PARAM(MemCopy)
410 DECLARE_LAYER_POLICY_1_PARAM(MemImport)
412 DECLARE_LAYER_POLICY_1_PARAM(Debug)
414 DECLARE_LAYER_POLICY_2_PARAM(DepthwiseConvolution2d)
416 DECLARE_LAYER_POLICY_1_PARAM(Dequantize)
418 DECLARE_LAYER_POLICY_2_PARAM(DetectionPostProcess)
420 DECLARE_LAYER_POLICY_1_PARAM(Equal)
422 DECLARE_LAYER_POLICY_2_PARAM(FakeQuantization)
424 DECLARE_LAYER_POLICY_1_PARAM(Floor)
426 DECLARE_LAYER_POLICY_2_PARAM(FullyConnected)
428 DECLARE_LAYER_POLICY_1_PARAM(Gather)
430 DECLARE_LAYER_POLICY_1_PARAM(Greater)
432 DECLARE_LAYER_POLICY_CUSTOM_PARAM(Input, armnn::LayerBindingId)
434 DECLARE_LAYER_POLICY_2_PARAM(L2Normalization)
436 DECLARE_LAYER_POLICY_2_PARAM(Lstm)
438 DECLARE_LAYER_POLICY_1_PARAM(Maximum)
440 DECLARE_LAYER_POLICY_2_PARAM(Mean)
442 DECLARE_LAYER_POLICY_1_PARAM(Merge)
444 DECLARE_LAYER_POLICY_1_PARAM(Minimum)
446 DECLARE_LAYER_POLICY_1_PARAM(Multiplication)
448 DECLARE_LAYER_POLICY_2_PARAM(Normalization)
450 DECLARE_LAYER_POLICY_CUSTOM_PARAM(Output, armnn::LayerBindingId)
452 DECLARE_LAYER_POLICY_2_PARAM(Pad)
454 DECLARE_LAYER_POLICY_1_PARAM(Quantize)
456 DECLARE_LAYER_POLICY_2_PARAM(Permute)
458 DECLARE_LAYER_POLICY_2_PARAM(Pooling2d)
460 DECLARE_LAYER_POLICY_2_PARAM(PreCompiled)
462 DECLARE_LAYER_POLICY_1_PARAM(Prelu)
464 DECLARE_LAYER_POLICY_1_PARAM(QuantizedLstm)
466 DECLARE_LAYER_POLICY_1_PARAM(Division)
468 DECLARE_LAYER_POLICY_2_PARAM(Resize)
470 DECLARE_LAYER_POLICY_2_PARAM(Reshape)
472 DECLARE_LAYER_POLICY_1_PARAM(Rsqrt)
474 DECLARE_LAYER_POLICY_2_PARAM(Softmax)
476 DECLARE_LAYER_POLICY_2_PARAM(SpaceToBatchNd)
478 DECLARE_LAYER_POLICY_2_PARAM(SpaceToDepth)
480 DECLARE_LAYER_POLICY_2_PARAM(Splitter)
482 DECLARE_LAYER_POLICY_2_PARAM(Stack)
484 DECLARE_LAYER_POLICY_2_PARAM(StridedSlice)
486 DECLARE_LAYER_POLICY_1_PARAM(Subtraction)
488 DECLARE_LAYER_POLICY_1_PARAM(Switch)
490 DECLARE_LAYER_POLICY_2_PARAM(TransposeConvolution2d)
493 // Generic implementation to get the number of input slots for a given layer type;
494 template<armnn::LayerType Type>
495 unsigned int GetNumInputs(const armnn::Layer& layer)
497 return layer.GetNumInputSlots();
500 // Generic implementation to get the number of output slots for a given layer type;
501 template<armnn::LayerType Type>
502 unsigned int GetNumOutputs(const armnn::Layer& layer)
504 return layer.GetNumOutputSlots();
508 unsigned int GetNumInputs<armnn::LayerType::Concat>(const armnn::Layer& layer)
510 boost::ignore_unused(layer);
514 // Tests that the IsLayerSupported() function returns the correct value.
515 // We determined the correct value by *trying* to create the relevant workload and seeing if it matches what we expect.
516 // Returns true if expectations are met, otherwise returns false.
517 template<typename FactoryType, armnn::DataType DataType, armnn::LayerType Type>
518 bool IsLayerSupportedTest(FactoryType *factory, Tag<Type>)
520 using LayerPolicy = LayerTypePolicy<Type, DataType>;
521 using LayerType = typename LayerPolicy::Type;
522 using LayerDesc = typename LayerPolicy::Desc;
523 DummyLayer<LayerType, LayerDesc> layer;
525 unsigned int numIn = GetNumInputs<Type>(*layer.m_Layer);
526 unsigned int numOut = GetNumOutputs<Type>(*layer.m_Layer);
528 // Make another dummy layer just to make IsLayerSupported have valid inputs.
529 DummyLayer<armnn::ConstantLayer, void> previousLayer;
530 // Set output of the previous layer to a dummy tensor.
531 armnn::TensorInfo output = MakeDummyTensorInfo<DataType>();
532 previousLayer.m_Layer->GetOutputSlot(0).SetTensorInfo(output);
533 // Connect all outputs of the previous layer to inputs of tested layer.
534 for (unsigned int i = 0; i < numIn; i++)
536 armnn::IOutputSlot& previousLayerOutputSlot = previousLayer.m_Layer->GetOutputSlot(0);
537 armnn::IInputSlot& layerInputSlot = layer.m_Layer->GetInputSlot(i);
538 previousLayerOutputSlot.Connect(layerInputSlot);
540 // Set outputs of tested layer to a dummy tensor.
541 for (unsigned int i = 0; i < numOut; i++)
543 layer.m_Layer->GetOutputSlot(0).SetTensorInfo(output);
546 std::string layerName = LayerPolicy::NameStr;
547 std::string reasonIfUnsupported;
548 if (FactoryType::IsLayerSupported(*layer.m_Layer, DataType, reasonIfUnsupported))
550 std::string errorMsg = " layer expected support but found none.";
553 bool retVal = LayerPolicy::MakeDummyWorkload(factory, numIn, numOut).get() != nullptr;
554 BOOST_CHECK_MESSAGE(retVal, layerName << errorMsg);
557 catch(const armnn::InvalidArgumentException& e)
559 boost::ignore_unused(e);
560 // This is ok since we throw InvalidArgumentException when creating the dummy workload.
563 catch(const std::exception& e)
566 BOOST_TEST_ERROR(layerName << ": " << errorMsg);
571 errorMsg = "Unexpected error while testing support for ";
572 BOOST_TEST_ERROR(errorMsg << layerName);
578 std::string errorMsg = "layer expected no support (giving reason: " + reasonIfUnsupported + ") but found some.";
581 bool retVal = LayerPolicy::MakeDummyWorkload(factory, numIn, numOut).get() == nullptr;
582 BOOST_CHECK_MESSAGE(retVal, layerName << errorMsg);
585 // These two exceptions are ok: For workloads that are partially supported, attempting to instantiate them
586 // using parameters that make IsLayerSupported() return false should throw an
587 // InvalidArgumentException or UnimplementedException.
588 catch(const armnn::InvalidArgumentException& e)
590 boost::ignore_unused(e);
593 catch(const armnn::UnimplementedException& e)
595 boost::ignore_unused(e);
598 catch(const std::exception& e)
601 BOOST_TEST_ERROR(layerName << ": " << errorMsg);
606 errorMsg = "Unexpected error while testing support for ";
607 BOOST_TEST_ERROR(errorMsg << layerName);
613 // Helper function to compute the next type in the LayerType enum.
614 constexpr armnn::LayerType NextType(armnn::LayerType type)
616 return static_cast<armnn::LayerType>(static_cast<int>(type)+1);
619 // Termination function for determining the end of the LayerType enumeration.
620 template<typename FactoryType, armnn::DataType DataType, armnn::LayerType Type>
621 bool IsLayerSupportedTestsImpl(FactoryType *factory, Tag<armnn::LayerType::LastLayer>)
623 return IsLayerSupportedTest<FactoryType, DataType, Type>(factory, Tag<Type>());
626 // Recursive function to test and enter in the LayerType enum and then iterate on the next entry.
627 template<typename FactoryType, armnn::DataType DataType, armnn::LayerType Type>
628 bool IsLayerSupportedTestsImpl(FactoryType *factory, Tag<Type>)
630 bool v = IsLayerSupportedTest<FactoryType, DataType, Type>(factory, Tag<Type>());
633 IsLayerSupportedTestsImpl<FactoryType, DataType, NextType(Type)>
634 (factory, Tag<NextType(Type)>());
637 // Helper function to pass through to the test framework.
638 template<typename FactoryType, armnn::DataType DataType>
639 bool IsLayerSupportedTests(FactoryType *factory)
641 return IsLayerSupportedTestsImpl<FactoryType, DataType>(factory, Tag<armnn::LayerType::FirstLayer>());
644 template<armnn::LayerType Type>
645 bool TestLayerTypeMatches()
647 using LayerPolicy = LayerTypePolicy<Type, armnn::DataType::Float32>;
648 using LayerType = typename LayerPolicy::Type;
649 using LayerDesc = typename LayerPolicy::Desc;
650 DummyLayer<LayerType, LayerDesc> layer;
652 std::stringstream ss;
653 ss << LayerPolicy::NameStr << " layer type mismatches expected layer type value.";
654 bool v = Type == layer.m_Layer->GetType();
655 BOOST_CHECK_MESSAGE(v, ss.str());
659 template<armnn::LayerType Type>
660 bool LayerTypeMatchesTestImpl(Tag<armnn::LayerType::LastLayer>)
662 return TestLayerTypeMatches<Type>();
665 template<armnn::LayerType Type>
666 bool LayerTypeMatchesTestImpl(Tag<Type>)
668 return TestLayerTypeMatches<Type>() &&
669 LayerTypeMatchesTestImpl<NextType(Type)>(Tag<NextType(Type)>());
672 template<typename FactoryType, typename LayerType, armnn::DataType InputDataType , armnn::DataType OutputDataType>
673 bool IsConvertLayerSupportedTests(std::string& reasonIfUnsupported)
676 LayerType* const layer = graph.AddLayer<LayerType>("LayerName");
678 armnn::Layer* const input = graph.AddLayer<armnn::InputLayer>(0, "input");
679 armnn::Layer* const output = graph.AddLayer<armnn::OutputLayer>(0, "output");
681 armnn::TensorInfo inputTensorInfo({1, 3, 2, 3}, InputDataType);
682 armnn::TensorInfo outputTensorInfo({1, 3, 2, 3}, OutputDataType);
684 input->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
685 input->GetOutputHandler(0).SetTensorInfo(inputTensorInfo);
686 layer->GetOutputSlot(0).Connect(output->GetInputSlot(0));
687 layer->GetOutputHandler(0).SetTensorInfo(outputTensorInfo);
689 bool result = FactoryType::IsLayerSupported(*layer, InputDataType, reasonIfUnsupported);
694 template<typename FactoryType, armnn::DataType InputDataType , armnn::DataType OutputDataType>
695 bool IsMeanLayerSupportedTests(std::string& reasonIfUnsupported)
698 static const std::vector<unsigned> axes = {1, 0};
699 armnn::MeanDescriptor desc(axes, false);
701 armnn::Layer* const layer = graph.AddLayer<armnn::MeanLayer>(desc, "LayerName");
703 armnn::Layer* const input = graph.AddLayer<armnn::InputLayer>(0, "input");
704 armnn::Layer* const output = graph.AddLayer<armnn::OutputLayer>(0, "output");
706 armnn::TensorInfo inputTensorInfo({4, 3, 2}, InputDataType);
707 armnn::TensorInfo outputTensorInfo({2}, OutputDataType);
709 input->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
710 input->GetOutputHandler(0).SetTensorInfo(inputTensorInfo);
711 layer->GetOutputSlot(0).Connect(output->GetInputSlot(0));
712 layer->GetOutputHandler(0).SetTensorInfo(outputTensorInfo);
714 bool result = FactoryType::IsLayerSupported(*layer, InputDataType, reasonIfUnsupported);
719 // Tests that IsMeanSupported fails when input tensor dimensions
720 // do not match output tensor dimensions when keepDims == true
721 template<typename FactoryType, armnn::DataType InputDataType , armnn::DataType OutputDataType>
722 bool IsMeanLayerNotSupportedTests(std::string& reasonIfUnsupported)
725 static const std::vector<unsigned> axes = {};
726 // Set keepDims == true
727 armnn::MeanDescriptor desc(axes, true);
729 armnn::Layer* const layer = graph.AddLayer<armnn::MeanLayer>(desc, "LayerName");
731 armnn::Layer* const input = graph.AddLayer<armnn::InputLayer>(0, "input");
732 armnn::Layer* const output = graph.AddLayer<armnn::OutputLayer>(0, "output");
734 // Mismatching number of tensor dimensions
735 armnn::TensorInfo inputTensorInfo({1, 1, 1, 1}, InputDataType);
736 armnn::TensorInfo outputTensorInfo({1, 1}, OutputDataType);
738 input->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
739 input->GetOutputHandler(0).SetTensorInfo(inputTensorInfo);
740 layer->GetOutputSlot(0).Connect(output->GetInputSlot(0));
741 layer->GetOutputHandler(0).SetTensorInfo(outputTensorInfo);
743 bool result = FactoryType::IsLayerSupported(*layer, InputDataType, reasonIfUnsupported);