2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
9 #include <backendsCommon/WorkloadFactory.hpp>
11 #include <boost/core/ignore_unused.hpp>
15 armnn::Graph dummyGraph;
17 // Make a dummy TensorInfo object.
18 template<armnn::DataType DataType>
19 armnn::TensorInfo MakeDummyTensorInfo()
21 return armnn::TensorInfo({2,2,2,2}, DataType);
25 // Make a dummy WorkloadInfo using a dummy TensorInfo.
26 template<armnn::DataType DataType>
27 armnn::WorkloadInfo MakeDummyWorkloadInfo(unsigned int numInputs, unsigned int numOutputs)
29 armnn::WorkloadInfo info;
31 for (unsigned int i=0; i < numInputs; i++)
33 info.m_InputTensorInfos.push_back(MakeDummyTensorInfo<DataType>());
36 for (unsigned int o=0; o < numOutputs; o++)
38 info.m_OutputTensorInfos.push_back(MakeDummyTensorInfo<DataType>());
44 // Template class to create a dummy layer (2 parameters).
45 template<typename LayerType, typename DescType = typename LayerType::DescriptorType>
50 m_Layer = dummyGraph.AddLayer<LayerType>(DescType(), "");
55 dummyGraph.EraseLayer(m_Layer);
61 // Template class to create a dummy layer (1 parameter).
62 template<typename LayerType>
63 struct DummyLayer<LayerType, void>
67 m_Layer = dummyGraph.AddLayer<LayerType>("");
72 dummyGraph.EraseLayer(m_Layer);
79 struct DummyLayer<armnn::BatchNormalizationLayer>
83 m_Layer = dummyGraph.AddLayer<armnn::BatchNormalizationLayer>(armnn::BatchNormalizationDescriptor(), "");
84 m_Layer->m_Mean = std::make_unique<armnn::ScopedCpuTensorHandle>(
85 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
86 m_Layer->m_Variance = std::make_unique<armnn::ScopedCpuTensorHandle>(
87 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
88 m_Layer->m_Beta = std::make_unique<armnn::ScopedCpuTensorHandle>(
89 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
90 m_Layer->m_Gamma = std::make_unique<armnn::ScopedCpuTensorHandle>(
91 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
96 dummyGraph.EraseLayer(m_Layer);
99 armnn::BatchNormalizationLayer* m_Layer;
103 struct DummyLayer<armnn::BatchToSpaceNdLayer>
107 m_Layer = dummyGraph.AddLayer<armnn::BatchToSpaceNdLayer>(armnn::BatchToSpaceNdDescriptor(), "");
112 dummyGraph.EraseLayer(m_Layer);
115 armnn::BatchToSpaceNdLayer* m_Layer;
119 struct DummyLayer<armnn::ConstantLayer, void>
123 m_Layer = dummyGraph.AddLayer<armnn::ConstantLayer>("");
128 dummyGraph.EraseLayer(m_Layer);
131 armnn::ConstantLayer* m_Layer;
135 struct DummyLayer<armnn::InputLayer, armnn::LayerBindingId>
139 m_Layer = dummyGraph.AddLayer<armnn::InputLayer>(armnn::LayerBindingId(), "");
144 dummyGraph.EraseLayer(m_Layer);
147 armnn::InputLayer* m_Layer;
151 struct DummyLayer<armnn::ConcatLayer>
155 armnn::OriginsDescriptor desc(2);
156 m_Layer = dummyGraph.AddLayer<armnn::ConcatLayer>(desc, "");
161 dummyGraph.EraseLayer(m_Layer);
164 armnn::ConcatLayer* m_Layer;
168 struct DummyLayer<armnn::OutputLayer, armnn::LayerBindingId>
172 m_Layer = dummyGraph.AddLayer<armnn::OutputLayer>(armnn::LayerBindingId(), "");
177 dummyGraph.EraseLayer(m_Layer);
180 armnn::OutputLayer* m_Layer;
184 struct DummyLayer<armnn::SplitterLayer>
188 armnn::ViewsDescriptor desc(1);
189 m_Layer = dummyGraph.AddLayer<armnn::SplitterLayer>(desc, "");
194 dummyGraph.EraseLayer(m_Layer);
197 armnn::SplitterLayer* m_Layer;
200 template <typename ConvolutionLayerType>
201 struct DummyConvolutionLayer
203 DummyConvolutionLayer()
205 typename ConvolutionLayerType::DescriptorType desc;
206 m_Layer = dummyGraph.AddLayer<ConvolutionLayerType>(desc, "");
207 m_Layer->m_Weight = std::make_unique<armnn::ScopedCpuTensorHandle>(
208 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
209 m_Layer->m_Bias = std::make_unique<armnn::ScopedCpuTensorHandle>(
210 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
213 ~DummyConvolutionLayer()
215 dummyGraph.EraseLayer(m_Layer);
218 ConvolutionLayerType* m_Layer;
222 struct DummyLayer<armnn::Convolution2dLayer>
223 : public DummyConvolutionLayer<armnn::Convolution2dLayer>
228 struct DummyLayer<armnn::DepthwiseConvolution2dLayer>
229 : public DummyConvolutionLayer<armnn::DepthwiseConvolution2dLayer>
234 struct DummyLayer<armnn::TransposeConvolution2dLayer>
235 : public DummyConvolutionLayer<armnn::TransposeConvolution2dLayer>
239 template <typename LstmLayerType>
240 struct DummyLstmLayer
244 typename LstmLayerType::DescriptorType desc;
245 desc.m_CifgEnabled = false;
247 m_Layer = dummyGraph.AddLayer<LstmLayerType>(armnn::LstmDescriptor(), "");
248 m_Layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
249 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
250 m_Layer->m_BasicParameters.m_InputToCellWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
251 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
252 m_Layer->m_BasicParameters.m_InputToOutputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
253 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
254 m_Layer->m_BasicParameters.m_RecurrentToForgetWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
255 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
256 m_Layer->m_BasicParameters.m_RecurrentToCellWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
257 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
258 m_Layer->m_BasicParameters.m_RecurrentToOutputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
259 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
260 m_Layer->m_BasicParameters.m_ForgetGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
261 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
262 m_Layer->m_BasicParameters.m_CellBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
263 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
264 m_Layer->m_BasicParameters.m_OutputGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
265 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
267 m_Layer->m_CifgParameters.m_InputToInputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
268 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
269 m_Layer->m_CifgParameters.m_RecurrentToInputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
270 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
271 m_Layer->m_CifgParameters.m_CellToInputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
272 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
273 m_Layer->m_CifgParameters.m_InputGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
274 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
279 dummyGraph.EraseLayer(m_Layer);
282 armnn::LstmLayer* m_Layer;
286 struct DummyLayer<armnn::LstmLayer>
287 : public DummyLstmLayer<armnn::LstmLayer>
292 struct DummyLayer<armnn::QuantizedLstmLayer, void>
296 m_Layer = dummyGraph.AddLayer<armnn::QuantizedLstmLayer>("");
298 m_Layer->m_QuantizedLstmParameters.m_InputToInputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
299 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QuantisedAsymm8));
300 m_Layer->m_QuantizedLstmParameters.m_InputToForgetWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
301 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QuantisedAsymm8));
302 m_Layer->m_QuantizedLstmParameters.m_InputToCellWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
303 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QuantisedAsymm8));
304 m_Layer->m_QuantizedLstmParameters.m_InputToOutputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
305 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QuantisedAsymm8));
307 m_Layer->m_QuantizedLstmParameters.m_RecurrentToInputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
308 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QuantisedAsymm8));
309 m_Layer->m_QuantizedLstmParameters.m_RecurrentToForgetWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
310 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QuantisedAsymm8));
311 m_Layer->m_QuantizedLstmParameters.m_RecurrentToCellWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
312 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QuantisedAsymm8));
313 m_Layer->m_QuantizedLstmParameters.m_RecurrentToOutputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
314 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::QuantisedAsymm8));
316 m_Layer->m_QuantizedLstmParameters.m_InputGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
317 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Signed32));
318 m_Layer->m_QuantizedLstmParameters.m_ForgetGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
319 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Signed32));
320 m_Layer->m_QuantizedLstmParameters.m_CellBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
321 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Signed32));
322 m_Layer->m_QuantizedLstmParameters.m_OutputGateBias = std::make_unique<armnn::ScopedCpuTensorHandle>(
323 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Signed32));
328 dummyGraph.EraseLayer(m_Layer);
331 armnn::QuantizedLstmLayer* m_Layer;
335 struct DummyLayer<armnn::FullyConnectedLayer>
339 armnn::FullyConnectedLayer::DescriptorType desc;
340 m_Layer = dummyGraph.AddLayer<armnn::FullyConnectedLayer>(desc, "");
341 m_Layer->m_Weight = std::make_unique<armnn::ScopedCpuTensorHandle>(
342 armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
347 dummyGraph.EraseLayer(m_Layer);
350 armnn::FullyConnectedLayer* m_Layer;
353 // Tag for giving LayerType entries a unique strong type each.
354 template<armnn::LayerType>
357 #define DECLARE_LAYER_POLICY_CUSTOM_PARAM(name, descType) \
358 template<armnn::DataType DataType> \
359 struct LayerTypePolicy<armnn::LayerType::name, DataType> \
361 using Type = armnn::name##Layer; \
362 using Desc = descType; \
363 using QueueDesc = armnn::name##QueueDescriptor; \
364 constexpr static const char* NameStr = #name; \
366 static std::unique_ptr<armnn::IWorkload> MakeDummyWorkload(armnn::IWorkloadFactory *factory, \
367 unsigned int nIn, unsigned int nOut) \
370 armnn::WorkloadInfo info = MakeDummyWorkloadInfo<DataType>(nIn, nOut); \
371 return factory->Create##name(desc, info); \
375 // Define a layer policy specialization for use with the IsLayerSupported tests.
376 // Use this version for layers whose constructor takes 1 parameter(name).
377 #define DECLARE_LAYER_POLICY_1_PARAM(name) DECLARE_LAYER_POLICY_CUSTOM_PARAM(name, void)
379 // Define a layer policy specialization for use with the IsLayerSupported tests.
380 // Use this version for layers whose constructor takes 2 parameters(descriptor and name).
381 #define DECLARE_LAYER_POLICY_2_PARAM(name) DECLARE_LAYER_POLICY_CUSTOM_PARAM(name, armnn::name##Descriptor)
383 // Layer policy template.
384 template<armnn::LayerType Type, armnn::DataType DataType>
385 struct LayerTypePolicy;
387 // Every entry in the armnn::LayerType enum must be accounted for below.
388 DECLARE_LAYER_POLICY_1_PARAM(Abs)
390 DECLARE_LAYER_POLICY_2_PARAM(Activation)
392 DECLARE_LAYER_POLICY_1_PARAM(Addition)
394 DECLARE_LAYER_POLICY_2_PARAM(ArgMinMax)
396 DECLARE_LAYER_POLICY_2_PARAM(BatchNormalization)
398 DECLARE_LAYER_POLICY_2_PARAM(BatchToSpaceNd)
400 DECLARE_LAYER_POLICY_2_PARAM(Concat)
402 DECLARE_LAYER_POLICY_1_PARAM(Constant)
404 DECLARE_LAYER_POLICY_1_PARAM(ConvertFp16ToFp32)
406 DECLARE_LAYER_POLICY_1_PARAM(ConvertFp32ToFp16)
408 DECLARE_LAYER_POLICY_2_PARAM(Convolution2d)
410 DECLARE_LAYER_POLICY_1_PARAM(MemCopy)
412 DECLARE_LAYER_POLICY_1_PARAM(MemImport)
414 DECLARE_LAYER_POLICY_1_PARAM(Debug)
416 DECLARE_LAYER_POLICY_2_PARAM(DepthwiseConvolution2d)
418 DECLARE_LAYER_POLICY_1_PARAM(Dequantize)
420 DECLARE_LAYER_POLICY_2_PARAM(DetectionPostProcess)
422 DECLARE_LAYER_POLICY_1_PARAM(Equal)
424 DECLARE_LAYER_POLICY_2_PARAM(FakeQuantization)
426 DECLARE_LAYER_POLICY_1_PARAM(Floor)
428 DECLARE_LAYER_POLICY_2_PARAM(FullyConnected)
430 DECLARE_LAYER_POLICY_1_PARAM(Gather)
432 DECLARE_LAYER_POLICY_1_PARAM(Greater)
434 DECLARE_LAYER_POLICY_CUSTOM_PARAM(Input, armnn::LayerBindingId)
436 DECLARE_LAYER_POLICY_2_PARAM(L2Normalization)
438 DECLARE_LAYER_POLICY_2_PARAM(Lstm)
440 DECLARE_LAYER_POLICY_1_PARAM(Maximum)
442 DECLARE_LAYER_POLICY_2_PARAM(Mean)
444 DECLARE_LAYER_POLICY_1_PARAM(Merge)
446 DECLARE_LAYER_POLICY_1_PARAM(Minimum)
448 DECLARE_LAYER_POLICY_1_PARAM(Multiplication)
450 DECLARE_LAYER_POLICY_2_PARAM(Normalization)
452 DECLARE_LAYER_POLICY_CUSTOM_PARAM(Output, armnn::LayerBindingId)
454 DECLARE_LAYER_POLICY_2_PARAM(Pad)
456 DECLARE_LAYER_POLICY_1_PARAM(Quantize)
458 DECLARE_LAYER_POLICY_2_PARAM(Permute)
460 DECLARE_LAYER_POLICY_2_PARAM(Pooling2d)
462 DECLARE_LAYER_POLICY_2_PARAM(PreCompiled)
464 DECLARE_LAYER_POLICY_1_PARAM(Prelu)
466 DECLARE_LAYER_POLICY_1_PARAM(QuantizedLstm)
468 DECLARE_LAYER_POLICY_1_PARAM(Division)
470 DECLARE_LAYER_POLICY_2_PARAM(Resize)
472 DECLARE_LAYER_POLICY_2_PARAM(Reshape)
474 DECLARE_LAYER_POLICY_1_PARAM(Rsqrt)
476 DECLARE_LAYER_POLICY_2_PARAM(Softmax)
478 DECLARE_LAYER_POLICY_2_PARAM(SpaceToBatchNd)
480 DECLARE_LAYER_POLICY_2_PARAM(SpaceToDepth)
482 DECLARE_LAYER_POLICY_2_PARAM(Splitter)
484 DECLARE_LAYER_POLICY_2_PARAM(Stack)
486 DECLARE_LAYER_POLICY_2_PARAM(StridedSlice)
488 DECLARE_LAYER_POLICY_1_PARAM(Subtraction)
490 DECLARE_LAYER_POLICY_1_PARAM(Switch)
492 DECLARE_LAYER_POLICY_2_PARAM(TransposeConvolution2d)
495 // Generic implementation to get the number of input slots for a given layer type;
496 template<armnn::LayerType Type>
497 unsigned int GetNumInputs(const armnn::Layer& layer)
499 return layer.GetNumInputSlots();
502 // Generic implementation to get the number of output slots for a given layer type;
503 template<armnn::LayerType Type>
504 unsigned int GetNumOutputs(const armnn::Layer& layer)
506 return layer.GetNumOutputSlots();
510 unsigned int GetNumInputs<armnn::LayerType::Concat>(const armnn::Layer& layer)
512 boost::ignore_unused(layer);
516 // Tests that the IsLayerSupported() function returns the correct value.
517 // We determined the correct value by *trying* to create the relevant workload and seeing if it matches what we expect.
518 // Returns true if expectations are met, otherwise returns false.
519 template<typename FactoryType, armnn::DataType DataType, armnn::LayerType Type>
520 bool IsLayerSupportedTest(FactoryType *factory, Tag<Type>)
522 using LayerPolicy = LayerTypePolicy<Type, DataType>;
523 using LayerType = typename LayerPolicy::Type;
524 using LayerDesc = typename LayerPolicy::Desc;
525 DummyLayer<LayerType, LayerDesc> layer;
527 unsigned int numIn = GetNumInputs<Type>(*layer.m_Layer);
528 unsigned int numOut = GetNumOutputs<Type>(*layer.m_Layer);
530 // Make another dummy layer just to make IsLayerSupported have valid inputs.
531 DummyLayer<armnn::ConstantLayer, void> previousLayer;
532 // Set output of the previous layer to a dummy tensor.
533 armnn::TensorInfo output = MakeDummyTensorInfo<DataType>();
534 previousLayer.m_Layer->GetOutputSlot(0).SetTensorInfo(output);
535 // Connect all outputs of the previous layer to inputs of tested layer.
536 for (unsigned int i = 0; i < numIn; i++)
538 armnn::IOutputSlot& previousLayerOutputSlot = previousLayer.m_Layer->GetOutputSlot(0);
539 armnn::IInputSlot& layerInputSlot = layer.m_Layer->GetInputSlot(i);
540 previousLayerOutputSlot.Connect(layerInputSlot);
542 // Set outputs of tested layer to a dummy tensor.
543 for (unsigned int i = 0; i < numOut; i++)
545 layer.m_Layer->GetOutputSlot(0).SetTensorInfo(output);
548 std::string layerName = LayerPolicy::NameStr;
549 std::string reasonIfUnsupported;
550 if (FactoryType::IsLayerSupported(*layer.m_Layer, DataType, reasonIfUnsupported))
552 std::string errorMsg = " layer expected support but found none.";
555 bool retVal = LayerPolicy::MakeDummyWorkload(factory, numIn, numOut).get() != nullptr;
556 BOOST_CHECK_MESSAGE(retVal, layerName << errorMsg);
559 catch(const armnn::InvalidArgumentException& e)
561 boost::ignore_unused(e);
562 // This is ok since we throw InvalidArgumentException when creating the dummy workload.
565 catch(const std::exception& e)
568 BOOST_TEST_ERROR(layerName << ": " << errorMsg);
573 errorMsg = "Unexpected error while testing support for ";
574 BOOST_TEST_ERROR(errorMsg << layerName);
580 std::string errorMsg = "layer expected no support (giving reason: " + reasonIfUnsupported + ") but found some.";
583 bool retVal = LayerPolicy::MakeDummyWorkload(factory, numIn, numOut).get() == nullptr;
584 BOOST_CHECK_MESSAGE(retVal, layerName << errorMsg);
587 // These two exceptions are ok: For workloads that are partially supported, attempting to instantiate them
588 // using parameters that make IsLayerSupported() return false should throw an
589 // InvalidArgumentException or UnimplementedException.
590 catch(const armnn::InvalidArgumentException& e)
592 boost::ignore_unused(e);
595 catch(const armnn::UnimplementedException& e)
597 boost::ignore_unused(e);
600 catch(const std::exception& e)
603 BOOST_TEST_ERROR(layerName << ": " << errorMsg);
608 errorMsg = "Unexpected error while testing support for ";
609 BOOST_TEST_ERROR(errorMsg << layerName);
615 // Helper function to compute the next type in the LayerType enum.
616 constexpr armnn::LayerType NextType(armnn::LayerType type)
618 return static_cast<armnn::LayerType>(static_cast<int>(type)+1);
621 // Termination function for determining the end of the LayerType enumeration.
622 template<typename FactoryType, armnn::DataType DataType, armnn::LayerType Type>
623 bool IsLayerSupportedTestsImpl(FactoryType *factory, Tag<armnn::LayerType::LastLayer>)
625 return IsLayerSupportedTest<FactoryType, DataType, Type>(factory, Tag<Type>());
628 // Recursive function to test and enter in the LayerType enum and then iterate on the next entry.
629 template<typename FactoryType, armnn::DataType DataType, armnn::LayerType Type>
630 bool IsLayerSupportedTestsImpl(FactoryType *factory, Tag<Type>)
632 bool v = IsLayerSupportedTest<FactoryType, DataType, Type>(factory, Tag<Type>());
635 IsLayerSupportedTestsImpl<FactoryType, DataType, NextType(Type)>
636 (factory, Tag<NextType(Type)>());
639 // Helper function to pass through to the test framework.
640 template<typename FactoryType, armnn::DataType DataType>
641 bool IsLayerSupportedTests(FactoryType *factory)
643 return IsLayerSupportedTestsImpl<FactoryType, DataType>(factory, Tag<armnn::LayerType::FirstLayer>());
646 template<armnn::LayerType Type>
647 bool TestLayerTypeMatches()
649 using LayerPolicy = LayerTypePolicy<Type, armnn::DataType::Float32>;
650 using LayerType = typename LayerPolicy::Type;
651 using LayerDesc = typename LayerPolicy::Desc;
652 DummyLayer<LayerType, LayerDesc> layer;
654 std::stringstream ss;
655 ss << LayerPolicy::NameStr << " layer type mismatches expected layer type value.";
656 bool v = Type == layer.m_Layer->GetType();
657 BOOST_CHECK_MESSAGE(v, ss.str());
661 template<armnn::LayerType Type>
662 bool LayerTypeMatchesTestImpl(Tag<armnn::LayerType::LastLayer>)
664 return TestLayerTypeMatches<Type>();
667 template<armnn::LayerType Type>
668 bool LayerTypeMatchesTestImpl(Tag<Type>)
670 return TestLayerTypeMatches<Type>() &&
671 LayerTypeMatchesTestImpl<NextType(Type)>(Tag<NextType(Type)>());
674 template<typename FactoryType, typename LayerType, armnn::DataType InputDataType , armnn::DataType OutputDataType>
675 bool IsConvertLayerSupportedTests(std::string& reasonIfUnsupported)
678 LayerType* const layer = graph.AddLayer<LayerType>("LayerName");
680 armnn::Layer* const input = graph.AddLayer<armnn::InputLayer>(0, "input");
681 armnn::Layer* const output = graph.AddLayer<armnn::OutputLayer>(0, "output");
683 armnn::TensorInfo inputTensorInfo({1, 3, 2, 3}, InputDataType);
684 armnn::TensorInfo outputTensorInfo({1, 3, 2, 3}, OutputDataType);
686 input->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
687 input->GetOutputHandler(0).SetTensorInfo(inputTensorInfo);
688 layer->GetOutputSlot(0).Connect(output->GetInputSlot(0));
689 layer->GetOutputHandler(0).SetTensorInfo(outputTensorInfo);
691 bool result = FactoryType::IsLayerSupported(*layer, InputDataType, reasonIfUnsupported);
696 template<typename FactoryType, armnn::DataType InputDataType , armnn::DataType OutputDataType>
697 bool IsMeanLayerSupportedTests(std::string& reasonIfUnsupported)
700 static const std::vector<unsigned> axes = {1, 0};
701 armnn::MeanDescriptor desc(axes, false);
703 armnn::Layer* const layer = graph.AddLayer<armnn::MeanLayer>(desc, "LayerName");
705 armnn::Layer* const input = graph.AddLayer<armnn::InputLayer>(0, "input");
706 armnn::Layer* const output = graph.AddLayer<armnn::OutputLayer>(0, "output");
708 armnn::TensorInfo inputTensorInfo({4, 3, 2}, InputDataType);
709 armnn::TensorInfo outputTensorInfo({2}, OutputDataType);
711 input->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
712 input->GetOutputHandler(0).SetTensorInfo(inputTensorInfo);
713 layer->GetOutputSlot(0).Connect(output->GetInputSlot(0));
714 layer->GetOutputHandler(0).SetTensorInfo(outputTensorInfo);
716 bool result = FactoryType::IsLayerSupported(*layer, InputDataType, reasonIfUnsupported);
721 // Tests that IsMeanSupported fails when input tensor dimensions
722 // do not match output tensor dimensions when keepDims == true
723 template<typename FactoryType, armnn::DataType InputDataType , armnn::DataType OutputDataType>
724 bool IsMeanLayerNotSupportedTests(std::string& reasonIfUnsupported)
727 static const std::vector<unsigned> axes = {};
728 // Set keepDims == true
729 armnn::MeanDescriptor desc(axes, true);
731 armnn::Layer* const layer = graph.AddLayer<armnn::MeanLayer>(desc, "LayerName");
733 armnn::Layer* const input = graph.AddLayer<armnn::InputLayer>(0, "input");
734 armnn::Layer* const output = graph.AddLayer<armnn::OutputLayer>(0, "output");
736 // Mismatching number of tensor dimensions
737 armnn::TensorInfo inputTensorInfo({1, 1, 1, 1}, InputDataType);
738 armnn::TensorInfo outputTensorInfo({1, 1}, OutputDataType);
740 input->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
741 input->GetOutputHandler(0).SetTensorInfo(inputTensorInfo);
742 layer->GetOutputSlot(0).Connect(output->GetInputSlot(0));
743 layer->GetOutputHandler(0).SetTensorInfo(outputTensorInfo);
745 bool result = FactoryType::IsLayerSupported(*layer, InputDataType, reasonIfUnsupported);