2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
7 #include <LayersFwd.hpp>
9 #include <armnn/Types.hpp>
10 #include <armnn/LayerSupport.hpp>
11 #include <armnn/ILayerSupport.hpp>
12 #include <armnn/BackendRegistry.hpp>
14 #include <backendsCommon/WorkloadFactory.hpp>
15 #include <armnn/backends/IBackendInternal.hpp>
16 #include <backendsCommon/CpuTensorHandle.hpp>
17 #include <backendsCommon/WorkloadFactory.hpp>
19 #include <backendsCommon/test/WorkloadTestUtils.hpp>
21 #include <boost/cast.hpp>
22 #include <boost/iterator/transform_iterator.hpp>
33 const TensorInfo OverrideDataType(const TensorInfo& info, Optional<DataType> type)
40 return TensorInfo(info.GetShape(), type.value(), info.GetQuantizationScale(), info.GetQuantizationOffset());
43 } // anonymous namespace
45 bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId,
46 const IConnectableLayer& connectableLayer,
47 Optional<DataType> dataType,
48 std::string& outReasonIfUnsupported)
50 Optional<std::string&> reason = outReasonIfUnsupported;
52 const Layer& layer = *(boost::polymorphic_downcast<const Layer*>(&connectableLayer));
54 auto const& backendRegistry = BackendRegistryInstance();
55 if (!backendRegistry.IsBackendRegistered(backendId))
58 ss << connectableLayer.GetName() << " is not supported on " << backendId
59 << " because this backend is not registered.";
61 outReasonIfUnsupported = ss.str();
65 auto backendFactory = backendRegistry.GetFactory(backendId);
66 auto backendObject = backendFactory();
67 auto layerSupportObject = backendObject->GetLayerSupport();
69 switch(layer.GetType())
73 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
74 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
75 result = layerSupportObject->IsAbsSupported(OverrideDataType(input, dataType),
76 OverrideDataType(output, dataType),
80 case LayerType::Activation:
82 auto cLayer = boost::polymorphic_downcast<const ActivationLayer*>(&layer);
83 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
84 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
85 result = layerSupportObject->IsActivationSupported(
86 OverrideDataType(input, dataType),
87 OverrideDataType(output, dataType),
88 cLayer->GetParameters(),
92 case LayerType::Addition:
94 const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
95 const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
96 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
97 result = layerSupportObject->IsAdditionSupported(
98 OverrideDataType(input0, dataType),
99 OverrideDataType(input1, dataType),
100 OverrideDataType(output, dataType),
104 case LayerType::ArgMinMax:
106 auto cLayer = boost::polymorphic_downcast<const ArgMinMaxLayer*>(&layer);
107 const ArgMinMaxDescriptor& descriptor = cLayer->GetParameters();
109 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
110 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
111 result = layerSupportObject->IsArgMinMaxSupported(
112 OverrideDataType(input, dataType),
113 OverrideDataType(output, DataType::Signed32),
118 case LayerType::BatchNormalization:
120 auto cLayer = boost::polymorphic_downcast<const BatchNormalizationLayer*>(&layer);
121 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
122 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
123 const TensorInfo& mean = cLayer->m_Mean->GetTensorInfo();
124 const TensorInfo& var = cLayer->m_Variance->GetTensorInfo();
125 const TensorInfo& beta = cLayer->m_Beta->GetTensorInfo();
126 const TensorInfo& gamma = cLayer->m_Gamma->GetTensorInfo();
127 result = layerSupportObject->IsBatchNormalizationSupported(
128 OverrideDataType(input, dataType),
129 OverrideDataType(output, dataType),
130 OverrideDataType(mean, dataType),
131 OverrideDataType(var, dataType),
132 OverrideDataType(beta, dataType),
133 OverrideDataType(gamma, dataType),
134 cLayer->GetParameters(),
138 case LayerType::BatchToSpaceNd:
140 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
141 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
142 auto cLayer = boost::polymorphic_downcast<const BatchToSpaceNdLayer*>(&layer);
144 result = layerSupportObject->IsBatchToSpaceNdSupported(OverrideDataType(input, dataType),
145 OverrideDataType(output, dataType),
146 cLayer->GetParameters(),
150 case LayerType::Comparison:
152 auto cLayer = boost::polymorphic_downcast<const ComparisonLayer*>(&layer);
154 const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
155 const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
156 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
158 result = layerSupportObject->IsComparisonSupported(OverrideDataType(input0, dataType),
159 OverrideDataType(input1, dataType),
160 OverrideDataType(output, DataType::Boolean),
161 cLayer->GetParameters(),
165 case LayerType::Constant:
167 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
168 result = layerSupportObject->IsConstantSupported(OverrideDataType(output, dataType), reason);
171 case LayerType::ConvertFp16ToFp32:
173 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
174 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
175 result = layerSupportObject->IsConvertFp16ToFp32Supported(input, output, reason);
178 case LayerType::ConvertFp32ToFp16:
180 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
181 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
182 result = layerSupportObject->IsConvertFp32ToFp16Supported(input, output, reason);
185 case LayerType::Convolution2d:
187 auto cLayer = boost::polymorphic_downcast<const Convolution2dLayer*>(&layer);
189 const TensorInfo input = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(),
191 const TensorInfo output = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
192 BOOST_ASSERT(cLayer->m_Weight.get() != nullptr);
194 const Convolution2dDescriptor& descriptor = cLayer->GetParameters();
196 // Construct optional biases object based on the value of m_BiasEnabled
197 Optional<TensorInfo> biases;
198 if (descriptor.m_BiasEnabled)
201 OverrideDataType(cLayer->m_Bias->GetTensorInfo(), GetBiasTypeFromWeightsType(dataType));
204 result = layerSupportObject->IsConvolution2dSupported(
208 OverrideDataType(cLayer->m_Weight->GetTensorInfo(), dataType),
213 case LayerType::Debug:
215 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
216 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
218 result = layerSupportObject->IsDebugSupported(OverrideDataType(input, dataType),
219 OverrideDataType(output, dataType),
223 case LayerType::DepthToSpace:
225 auto cLayer = boost::polymorphic_downcast<const DepthToSpaceLayer*>(&layer);
227 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
228 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
230 result = layerSupportObject->IsDepthToSpaceSupported(OverrideDataType(input, dataType),
231 OverrideDataType(output, dataType),
232 cLayer->GetParameters(),
236 case LayerType::DepthwiseConvolution2d:
238 auto cLayer = boost::polymorphic_downcast<const DepthwiseConvolution2dLayer*>(&layer);
239 const TensorInfo& input = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(),
241 const TensorInfo& output = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
242 BOOST_ASSERT(cLayer->m_Weight.get() != nullptr);
244 const DepthwiseConvolution2dDescriptor& descriptor = cLayer->GetParameters();
246 // Construct optional biases object based on the value of m_BiasEnabled
247 Optional<TensorInfo> biases;
248 if (descriptor.m_BiasEnabled)
251 OverrideDataType(cLayer->m_Bias->GetTensorInfo(), GetBiasTypeFromWeightsType(dataType));
254 result = layerSupportObject->IsDepthwiseConvolutionSupported(
258 OverrideDataType(cLayer->m_Weight->GetTensorInfo(), dataType),
263 case LayerType::Dequantize:
265 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
266 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
268 result = layerSupportObject->IsDequantizeSupported(input,
269 OverrideDataType(output, dataType),
273 case LayerType::DetectionPostProcess:
275 auto cLayer = boost::polymorphic_downcast<const DetectionPostProcessLayer*>(&layer);
276 const TensorInfo& boxEncodings = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
277 const TensorInfo& scores = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
278 const TensorInfo& anchors = cLayer->m_Anchors->GetTensorInfo();
280 const TensorInfo& detectionBoxes = layer.GetOutputSlot(0).GetTensorInfo();
281 const TensorInfo& detectionClasses = layer.GetOutputSlot(1).GetTensorInfo();
282 const TensorInfo& detectionScores = layer.GetOutputSlot(2).GetTensorInfo();
283 const TensorInfo& numDetections = layer.GetOutputSlot(3).GetTensorInfo();
285 const DetectionPostProcessDescriptor& descriptor = cLayer->GetParameters();
286 result = layerSupportObject->IsDetectionPostProcessSupported(boxEncodings,
297 case LayerType::FakeQuantization:
299 auto cLayer = boost::polymorphic_downcast<const FakeQuantizationLayer*>(&layer);
300 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
301 result = layerSupportObject->IsFakeQuantizationSupported(OverrideDataType(input, dataType),
302 cLayer->GetParameters(),
306 case LayerType::Floor:
308 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
309 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
310 result = layerSupportObject->IsFloorSupported(OverrideDataType(input, dataType),
311 OverrideDataType(output, dataType),
315 case LayerType::FullyConnected:
317 auto cLayer = boost::polymorphic_downcast<const FullyConnectedLayer*>(&layer);
318 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
319 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
320 BOOST_ASSERT(cLayer->m_Weight.get() != nullptr);
323 const TensorInfo * biasInfoPtr = nullptr;
324 static const TensorInfo dummyFloat16Bias(TensorShape({1,1,1,1}), DataType::Float16);
325 static const TensorInfo dummyFloat32Bias(TensorShape({1,1,1,1}), DataType::Float32);
326 static const TensorInfo dummyQA8Bias(TensorShape({1,1,1,1}), DataType::Signed32);
328 const FullyConnectedDescriptor& descriptor = cLayer->GetParameters();
329 if (descriptor.m_BiasEnabled)
331 BOOST_ASSERT(cLayer->m_Bias.get() != nullptr);
332 biasInfo = OverrideDataType(cLayer->m_Bias->GetTensorInfo(), GetBiasTypeFromWeightsType(dataType));
333 biasInfoPtr = &biasInfo;
337 // If biases are not enabled pass a dummy tensorinfo for the validation
338 switch(input.GetDataType())
340 case DataType::Float16:
342 biasInfoPtr = &dummyFloat16Bias;
345 case DataType::Float32:
347 biasInfoPtr = &dummyFloat32Bias;
350 case DataType::QuantisedAsymm8:
351 case DataType::QuantisedSymm16:
353 biasInfoPtr = &dummyQA8Bias;
358 BOOST_ASSERT_MSG(false, "Unexpected bias type");
363 result = layerSupportObject->IsFullyConnectedSupported(
364 OverrideDataType(input, dataType),
365 OverrideDataType(output, dataType),
366 OverrideDataType(cLayer->m_Weight->GetTensorInfo(), dataType),
372 case LayerType::Gather:
374 const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
375 const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
376 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
377 result = layerSupportObject->IsGatherSupported(OverrideDataType(input0, dataType),
379 OverrideDataType(output, dataType),
383 case LayerType::Input:
385 const TensorInfo& input = layer.GetOutputSlot(0).GetTensorInfo();
386 result = layerSupportObject->IsInputSupported(OverrideDataType(input, dataType), reason);
389 case LayerType::InstanceNormalization:
391 auto cLayer = boost::polymorphic_downcast<const InstanceNormalizationLayer*>(&layer);
392 const InstanceNormalizationDescriptor& descriptor = cLayer->GetParameters();
394 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
395 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
397 result = layerSupportObject->IsInstanceNormalizationSupported(
398 OverrideDataType(input, dataType),
399 OverrideDataType(output, dataType),
404 case LayerType::L2Normalization:
406 auto cLayer = boost::polymorphic_downcast<const L2NormalizationLayer*>(&layer);
407 const L2NormalizationDescriptor& descriptor = cLayer->GetParameters();
409 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
410 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
412 result = layerSupportObject->IsL2NormalizationSupported(
413 OverrideDataType(input, dataType),
414 OverrideDataType(output, dataType),
419 case LayerType::LogSoftmax:
421 auto cLayer = boost::polymorphic_downcast<const LogSoftmaxLayer*>(&layer);
423 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
424 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
426 result = layerSupportObject->IsLogSoftmaxSupported(OverrideDataType(input, dataType),
427 OverrideDataType(output, dataType),
428 cLayer->GetParameters(),
432 case LayerType::Lstm:
434 auto cLayer = boost::polymorphic_downcast<const LstmLayer*>(&layer);
435 const LstmDescriptor& descriptor = cLayer->GetParameters();
438 const TensorInfo& input = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(),
440 const TensorInfo& outputStateIn = OverrideDataType(layer.GetInputSlot(1).GetConnection()->GetTensorInfo(),
442 const TensorInfo& cellStateIn = OverrideDataType(layer.GetInputSlot(2).GetConnection()->GetTensorInfo(),
445 const TensorInfo& scratchBuffer = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
446 const TensorInfo& outputStateOut = OverrideDataType(layer.GetOutputSlot(1).GetTensorInfo(), dataType);
447 const TensorInfo& cellStateOut = OverrideDataType(layer.GetOutputSlot(2).GetTensorInfo(), dataType);
448 const TensorInfo& output = OverrideDataType(layer.GetOutputSlot(3).GetTensorInfo(), dataType);
451 const TensorInfo& inputToForgetWeights
452 = OverrideDataType(cLayer->m_BasicParameters.m_InputToForgetWeights->GetTensorInfo(), dataType);
453 const TensorInfo& inputToCellWeights
454 = OverrideDataType(cLayer->m_BasicParameters.m_InputToCellWeights->GetTensorInfo(), dataType);
455 const TensorInfo& inputToOutputWeights
456 = OverrideDataType(cLayer->m_BasicParameters.m_InputToOutputWeights->GetTensorInfo(), dataType);
457 const TensorInfo& recurrentToForgetWeights
458 = OverrideDataType(cLayer->m_BasicParameters.m_RecurrentToForgetWeights->GetTensorInfo(), dataType);
459 const TensorInfo& recurrentToCellWeights
460 = OverrideDataType(cLayer->m_BasicParameters.m_RecurrentToCellWeights->GetTensorInfo(), dataType);
461 const TensorInfo& recurrentToOutputWeights
462 = OverrideDataType(cLayer->m_BasicParameters.m_RecurrentToOutputWeights->GetTensorInfo(), dataType);
463 const TensorInfo& forgetGateBias
464 = OverrideDataType(cLayer->m_BasicParameters.m_ForgetGateBias->GetTensorInfo(), dataType);
465 const TensorInfo& cellBias
466 = OverrideDataType(cLayer->m_BasicParameters.m_CellBias->GetTensorInfo(), dataType);
467 const TensorInfo& outputGateBias
468 = OverrideDataType(cLayer->m_BasicParameters.m_OutputGateBias->GetTensorInfo(), dataType);
470 LstmInputParamsInfo paramsInfo;
472 paramsInfo.m_InputToForgetWeights = &inputToForgetWeights;
473 paramsInfo.m_InputToCellWeights = &inputToCellWeights;
474 paramsInfo.m_InputToOutputWeights = &inputToOutputWeights;
475 paramsInfo.m_RecurrentToForgetWeights = &recurrentToForgetWeights;
476 paramsInfo.m_RecurrentToCellWeights = &recurrentToCellWeights;
477 paramsInfo.m_RecurrentToOutputWeights = &recurrentToOutputWeights;
478 paramsInfo.m_ForgetGateBias = &forgetGateBias;
479 paramsInfo.m_CellBias = &cellBias;
480 paramsInfo.m_OutputGateBias = &outputGateBias;
483 // Optional parameters
484 TensorInfo optInputToInputWeights;
485 TensorInfo optRecurrentToInputWeights;
486 TensorInfo optCellToInputWeights;
487 TensorInfo optInputGateBias;
488 TensorInfo optProjectionWeights;
489 TensorInfo optProjectionBias;
490 TensorInfo optCellToForgetWeights;
491 TensorInfo optCellToOutputWeights;
492 TensorInfo optInputLayerNormWeights;
493 TensorInfo optForgetLayerNormWeights;
494 TensorInfo optCellLayerNormWeights;
495 TensorInfo optOutputLayerNormWeights;
497 if(!descriptor.m_CifgEnabled)
499 optInputToInputWeights =
500 OverrideDataType(cLayer->m_CifgParameters.m_InputToInputWeights->GetTensorInfo(), dataType);
501 paramsInfo.m_InputToInputWeights = &optInputToInputWeights;
503 optRecurrentToInputWeights =
504 OverrideDataType(cLayer->m_CifgParameters.m_RecurrentToInputWeights->GetTensorInfo(), dataType);
505 paramsInfo.m_RecurrentToInputWeights = &optRecurrentToInputWeights;
506 if (cLayer->m_CifgParameters.m_CellToInputWeights != nullptr)
508 optCellToInputWeights =
509 OverrideDataType(cLayer->m_CifgParameters.m_CellToInputWeights->GetTensorInfo(), dataType);
510 paramsInfo.m_CellToInputWeights = &optCellToInputWeights;
513 OverrideDataType(cLayer->m_CifgParameters.m_InputGateBias->GetTensorInfo(), dataType);
514 paramsInfo.m_InputGateBias = &optInputGateBias;
517 if(descriptor.m_ProjectionEnabled)
519 optProjectionWeights =
520 OverrideDataType(cLayer->m_ProjectionParameters.m_ProjectionWeights->GetTensorInfo(), dataType);
521 paramsInfo.m_ProjectionWeights = &optProjectionWeights;
522 if (cLayer->m_ProjectionParameters.m_ProjectionBias != nullptr)
525 OverrideDataType(cLayer->m_ProjectionParameters.m_ProjectionBias->GetTensorInfo(), dataType);
526 paramsInfo.m_ProjectionBias = &optProjectionBias;
530 if(descriptor.m_PeepholeEnabled)
532 optCellToForgetWeights =
533 OverrideDataType(cLayer->m_PeepholeParameters.m_CellToForgetWeights->GetTensorInfo(), dataType);
534 paramsInfo.m_CellToForgetWeights = &optCellToForgetWeights;
535 optCellToOutputWeights =
536 OverrideDataType(cLayer->m_PeepholeParameters.m_CellToOutputWeights->GetTensorInfo(), dataType);
537 paramsInfo.m_CellToOutputWeights = &optCellToOutputWeights;
540 if(descriptor.m_LayerNormEnabled)
542 if (!descriptor.m_CifgEnabled)
544 optInputLayerNormWeights = OverrideDataType(
545 cLayer->m_LayerNormParameters.m_InputLayerNormWeights->GetTensorInfo(), dataType);
546 paramsInfo.m_InputLayerNormWeights = &optInputLayerNormWeights;
549 optForgetLayerNormWeights = OverrideDataType(
550 cLayer->m_LayerNormParameters.m_ForgetLayerNormWeights->GetTensorInfo(), dataType);
551 paramsInfo.m_ForgetLayerNormWeights = &optForgetLayerNormWeights;
553 optCellLayerNormWeights = OverrideDataType(
554 cLayer->m_LayerNormParameters.m_CellLayerNormWeights->GetTensorInfo(), dataType);
555 paramsInfo.m_CellLayerNormWeights = &optCellLayerNormWeights;
557 optOutputLayerNormWeights = OverrideDataType(
558 cLayer->m_LayerNormParameters.m_OutputLayerNormWeights->GetTensorInfo(), dataType);
559 paramsInfo.m_OutputLayerNormWeights = &optOutputLayerNormWeights;
562 result = layerSupportObject->IsLstmSupported(
575 case LayerType::Maximum:
577 const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
578 const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
579 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
581 result = layerSupportObject->IsMaximumSupported(OverrideDataType(input0, dataType),
582 OverrideDataType(input1, dataType),
583 OverrideDataType(output, dataType),
587 case LayerType::MemCopy:
589 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
590 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
592 result = layerSupportObject->IsMemCopySupported(OverrideDataType(input, dataType),
593 OverrideDataType(output, dataType),
597 case LayerType::MemImport:
599 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
600 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
602 result = layerSupportObject->IsMemImportSupported(OverrideDataType(input, dataType),
603 OverrideDataType(output, dataType),
607 case LayerType::Merge:
609 const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
610 const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
611 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
613 result = layerSupportObject->IsMergeSupported(OverrideDataType(input0, dataType),
614 OverrideDataType(input1, dataType),
615 OverrideDataType(output, dataType),
619 case LayerType::Concat:
621 auto cLayer = boost::polymorphic_downcast<const ConcatLayer*>(&layer);
623 // Get vector of all inputs.
624 auto getTensorInfo = [&dataType](const InputSlot& slot)
626 return OverrideDataType(slot.GetConnectedOutputSlot()->GetTensorInfo(), dataType);
628 auto beginI = boost::make_transform_iterator(layer.GetInputSlots().begin(), getTensorInfo);
629 auto endI = boost::make_transform_iterator(layer.GetInputSlots().end(), getTensorInfo);
630 std::vector<TensorInfo> inputs(beginI, endI);
632 auto getTensorInfoPtr = [](const TensorInfo& info)
636 auto beginPtr = boost::make_transform_iterator(inputs.begin(), getTensorInfoPtr);
637 auto endPtr = boost::make_transform_iterator(inputs.end(), getTensorInfoPtr);
638 std::vector<const TensorInfo*> inputPtrs(beginPtr, endPtr);
640 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
642 result = layerSupportObject->IsConcatSupported(inputPtrs, output, cLayer->GetParameters(), reason);
647 case LayerType::Multiplication:
649 const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
650 const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
651 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
652 result = layerSupportObject->IsMultiplicationSupported(
653 OverrideDataType(input0, dataType),
654 OverrideDataType(input1, dataType),
655 OverrideDataType(output, dataType),
659 case LayerType::Normalization:
661 auto cLayer = boost::polymorphic_downcast<const NormalizationLayer*>(&layer);
662 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
663 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
664 result = layerSupportObject->IsNormalizationSupported(OverrideDataType(input, dataType),
665 OverrideDataType(output, dataType),
666 cLayer->GetParameters(),
670 case LayerType::Output:
672 const TensorInfo& output = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
673 result = layerSupportObject->IsOutputSupported(OverrideDataType(output, dataType), reason);
676 case LayerType::Permute:
678 auto cLayer = boost::polymorphic_downcast<const PermuteLayer*>(&layer);
679 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
680 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
681 result = layerSupportObject->IsPermuteSupported(OverrideDataType(input, dataType),
682 OverrideDataType(output, dataType),
683 cLayer->GetParameters(),
689 auto cLayer = boost::polymorphic_downcast<const PadLayer*>(&layer);
690 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
691 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
692 result = layerSupportObject->IsPadSupported(
693 OverrideDataType(input, dataType),
694 OverrideDataType(output, dataType),
695 cLayer->GetParameters(),
699 case LayerType::Pooling2d:
701 auto cLayer = boost::polymorphic_downcast<const Pooling2dLayer*>(&layer);
702 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
703 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
704 result = layerSupportObject->IsPooling2dSupported(OverrideDataType(input, dataType),
705 OverrideDataType(output, dataType),
706 cLayer->GetParameters(),
710 case LayerType::PreCompiled:
712 auto cLayer = boost::polymorphic_downcast<const PreCompiledLayer*>(&layer);
713 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
714 result = layerSupportObject->IsPreCompiledSupported(OverrideDataType(input, dataType),
715 cLayer->GetParameters(),
719 case LayerType::Quantize:
721 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
722 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
723 result = layerSupportObject->IsQuantizeSupported(input, output, reason);
726 case LayerType::QuantizedLstm:
728 auto cLayer = boost::polymorphic_downcast<const QuantizedLstmLayer*>(&layer);
731 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
732 const TensorInfo& previousCellStateIn = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
733 const TensorInfo& previousOutputIn = layer.GetInputSlot(2).GetConnection()->GetTensorInfo();
736 const TensorInfo& cellStateOut = layer.GetOutputSlot(0).GetTensorInfo();
737 const TensorInfo& output = layer.GetOutputSlot(1).GetTensorInfo();
739 // QuantizedLstm parameters
740 QuantizedLstmInputParamsInfo paramsInfo;
742 paramsInfo.m_InputToInputWeights =
743 &cLayer->m_QuantizedLstmParameters.m_InputToInputWeights->GetTensorInfo();
744 paramsInfo.m_InputToForgetWeights =
745 &cLayer->m_QuantizedLstmParameters.m_InputToForgetWeights->GetTensorInfo();
746 paramsInfo.m_InputToCellWeights =
747 &cLayer->m_QuantizedLstmParameters.m_InputToCellWeights->GetTensorInfo();
748 paramsInfo.m_InputToOutputWeights =
749 &cLayer->m_QuantizedLstmParameters.m_InputToOutputWeights->GetTensorInfo();
751 paramsInfo.m_RecurrentToInputWeights =
752 &cLayer->m_QuantizedLstmParameters.m_RecurrentToInputWeights->GetTensorInfo();
753 paramsInfo.m_RecurrentToForgetWeights =
754 &cLayer->m_QuantizedLstmParameters.m_RecurrentToForgetWeights->GetTensorInfo();
755 paramsInfo.m_RecurrentToCellWeights =
756 &cLayer->m_QuantizedLstmParameters.m_RecurrentToCellWeights->GetTensorInfo();
757 paramsInfo.m_RecurrentToOutputWeights =
758 &cLayer->m_QuantizedLstmParameters.m_RecurrentToOutputWeights->GetTensorInfo();
760 paramsInfo.m_InputGateBias =
761 &cLayer->m_QuantizedLstmParameters.m_InputGateBias->GetTensorInfo();
762 paramsInfo.m_ForgetGateBias =
763 &cLayer->m_QuantizedLstmParameters.m_ForgetGateBias->GetTensorInfo();
764 paramsInfo.m_CellBias =
765 &cLayer->m_QuantizedLstmParameters.m_CellBias->GetTensorInfo();
766 paramsInfo.m_OutputGateBias =
767 &cLayer->m_QuantizedLstmParameters.m_OutputGateBias->GetTensorInfo();;
769 result = layerSupportObject->IsQuantizedLstmSupported(input,
778 case LayerType::Division:
780 const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
781 const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
782 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
783 result = layerSupportObject->IsDivisionSupported(
784 OverrideDataType(input0, dataType),
785 OverrideDataType(input1, dataType),
786 OverrideDataType(output, dataType),
790 case LayerType::Reshape:
792 auto cLayer = boost::polymorphic_downcast<const ReshapeLayer*>(&layer);
793 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
794 result = layerSupportObject->IsReshapeSupported(OverrideDataType(input, dataType),
795 cLayer->GetParameters(),
799 case LayerType::Resize:
801 auto cLayer = boost::polymorphic_downcast<const ResizeLayer*>(&layer);
802 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
803 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
804 result = layerSupportObject->IsResizeSupported(OverrideDataType(input, dataType),
805 OverrideDataType(output, dataType),
806 cLayer->GetParameters(),
810 case LayerType::Rsqrt:
812 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
813 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
814 result = layerSupportObject->IsRsqrtSupported(OverrideDataType(input, dataType),
815 OverrideDataType(output, dataType),
819 case LayerType::Slice:
821 auto cLayer = boost::polymorphic_downcast<const SliceLayer*>(&layer);
823 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
824 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
826 result = layerSupportObject->IsSliceSupported(OverrideDataType(input, dataType),
827 OverrideDataType(output, dataType),
828 cLayer->GetParameters(),
832 case LayerType::Softmax:
834 auto cLayer = boost::polymorphic_downcast<const SoftmaxLayer*>(&layer);
835 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
836 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
837 result = layerSupportObject->IsSoftmaxSupported(OverrideDataType(input, dataType),
838 OverrideDataType(output, dataType),
839 cLayer->GetParameters(),
843 case LayerType::SpaceToBatchNd:
845 auto cLayer = boost::polymorphic_downcast<const SpaceToBatchNdLayer*>(&layer);
846 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
847 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
848 result = layerSupportObject->IsSpaceToBatchNdSupported(OverrideDataType(input, dataType),
849 OverrideDataType(output, dataType),
850 cLayer->GetParameters(),
854 case LayerType::SpaceToDepth:
856 auto cLayer = boost::polymorphic_downcast<const SpaceToDepthLayer*>(&layer);
858 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
859 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
861 result = layerSupportObject->IsSpaceToDepthSupported(OverrideDataType(input, dataType),
862 OverrideDataType(output, dataType),
863 cLayer->GetParameters(),
867 case LayerType::Splitter:
869 auto cLayer = boost::polymorphic_downcast<const SplitterLayer*>(&layer);
870 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
872 // Get vector of all outputs.
873 auto getTensorInfo = [&dataType](const OutputSlot& slot)
875 return OverrideDataType(slot.GetTensorInfo(), dataType);
877 auto beginI = boost::make_transform_iterator(layer.GetOutputSlots().begin(), getTensorInfo);
878 auto endI = boost::make_transform_iterator(layer.GetOutputSlots().end(), getTensorInfo);
879 std::vector<TensorInfo> outputs(beginI, endI);
881 const std::vector<std::reference_wrapper<TensorInfo>> outputPtrs(outputs.begin(), outputs.end());
883 result = layerSupportObject->IsSplitterSupported(OverrideDataType(input, dataType),
885 cLayer->GetParameters(),
889 case LayerType::Stack:
891 auto cLayer = boost::polymorphic_downcast<const StackLayer*>(&layer);
893 // Get vector of all inputs.
894 auto getTensorInfo = [&dataType](const InputSlot& slot)
896 return OverrideDataType(slot.GetConnectedOutputSlot()->GetTensorInfo(), dataType);
898 auto beginI = boost::make_transform_iterator(layer.GetInputSlots().begin(), getTensorInfo);
899 auto endI = boost::make_transform_iterator(layer.GetInputSlots().end(), getTensorInfo);
900 std::vector<TensorInfo> inputs(beginI, endI);
902 auto getTensorInfoPtr = [](const TensorInfo& info)
906 auto beginPtr = boost::make_transform_iterator(inputs.begin(), getTensorInfoPtr);
907 auto endPtr = boost::make_transform_iterator(inputs.end(), getTensorInfoPtr);
908 std::vector<const TensorInfo*> inputPtrs(beginPtr, endPtr);
910 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
912 result = layerSupportObject->IsStackSupported(inputPtrs, output, cLayer->GetParameters(), reason);
916 case LayerType::StandIn:
918 auto cLayer = boost::polymorphic_downcast<const StandInLayer*>(&layer);
920 // Get vector of all inputs.
921 auto getTensorInfoIn = [&dataType](const InputSlot& slot)
923 return OverrideDataType(slot.GetConnectedOutputSlot()->GetTensorInfo(), dataType);
925 auto getTensorInfoOut = [&dataType](const OutputSlot& slot)
927 return OverrideDataType(slot.GetTensorInfo(), dataType);
929 auto beginI = boost::make_transform_iterator(layer.GetInputSlots().begin(), getTensorInfoIn);
930 auto endI = boost::make_transform_iterator(layer.GetInputSlots().end(), getTensorInfoIn);
931 std::vector<TensorInfo> inputs(beginI, endI);
933 auto beginO = boost::make_transform_iterator(layer.GetOutputSlots().begin(), getTensorInfoOut);
934 auto endO = boost::make_transform_iterator(layer.GetOutputSlots().end(), getTensorInfoOut);
935 std::vector<TensorInfo> outputs(beginO, endO);
938 auto getTensorInfoPtr = [](const TensorInfo& info)
942 auto beginPtrI = boost::make_transform_iterator(inputs.begin(), getTensorInfoPtr);
943 auto endPtrI = boost::make_transform_iterator(inputs.end(), getTensorInfoPtr);
944 std::vector<const TensorInfo*> inputPtrs(beginPtrI, endPtrI);
946 auto beginPtrO = boost::make_transform_iterator(outputs.begin(), getTensorInfoPtr);
947 auto endPtrO = boost::make_transform_iterator(outputs.end(), getTensorInfoPtr);
948 std::vector<const TensorInfo*> outputPtrs(beginPtrO, endPtrO);
951 result = layerSupportObject->IsStandInSupported(inputPtrs,
953 cLayer->GetParameters(),
957 case LayerType::StridedSlice:
959 auto cLayer = boost::polymorphic_downcast<const StridedSliceLayer*>(&layer);
960 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
961 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
962 result = layerSupportObject->IsStridedSliceSupported(OverrideDataType(input, dataType),
963 OverrideDataType(output, dataType),
964 cLayer->GetParameters(),
968 case LayerType::Subtraction:
970 const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
971 const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
972 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
973 result = layerSupportObject->IsSubtractionSupported(
974 OverrideDataType(input0, dataType),
975 OverrideDataType(input1, dataType),
976 OverrideDataType(output, dataType),
980 case LayerType::Switch:
982 const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
983 const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
984 const TensorInfo& output0 = layer.GetOutputSlot(0).GetTensorInfo();
985 const TensorInfo& output1 = layer.GetOutputSlot(1).GetTensorInfo();
986 result = layerSupportObject->IsSwitchSupported(OverrideDataType(input0, dataType),
987 OverrideDataType(input1, dataType),
988 OverrideDataType(output0, dataType),
989 OverrideDataType(output1, dataType),
993 case LayerType::Mean:
995 auto cLayer = boost::polymorphic_downcast<const MeanLayer*>(&layer);
996 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
997 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
998 result = layerSupportObject->IsMeanSupported(
999 OverrideDataType(input, dataType),
1000 OverrideDataType(output, dataType),
1001 cLayer->GetParameters(),
1005 case LayerType::Minimum:
1007 const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1008 const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
1009 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1010 result = layerSupportObject->IsMinimumSupported(OverrideDataType(input0, dataType),
1011 OverrideDataType(input1, dataType),
1012 OverrideDataType(output, dataType),
1016 case LayerType::Prelu:
1018 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1019 const TensorInfo& alpha = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
1020 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1021 result = layerSupportObject->IsPreluSupported(OverrideDataType(input, dataType),
1022 OverrideDataType(alpha, dataType),
1023 OverrideDataType(output, dataType),
1027 case LayerType::TransposeConvolution2d:
1029 auto cLayer = boost::polymorphic_downcast<const TransposeConvolution2dLayer*>(&layer);
1031 const TensorInfo input = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(),
1033 const TensorInfo output = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
1035 const TransposeConvolution2dDescriptor& descriptor = cLayer->GetParameters();
1037 Optional<TensorInfo> biases;
1038 if (descriptor.m_BiasEnabled)
1040 BOOST_ASSERT(cLayer->m_Bias.get() != nullptr);
1041 biases = OverrideDataType(cLayer->m_Bias->GetTensorInfo(),
1042 GetBiasTypeFromWeightsType(dataType));
1045 BOOST_ASSERT(cLayer->m_Weight.get() != nullptr);
1046 const TensorInfo weights = OverrideDataType(cLayer->m_Weight->GetTensorInfo(), dataType);
1048 result = layerSupportObject->IsTransposeConvolution2dSupported(input,
1059 BOOST_ASSERT_MSG(false, "WorkloadFactory did not recognise type of layer.");
1060 reason.value() = "Unrecognised layer type";
1068 bool IWorkloadFactory::IsLayerSupported(const IConnectableLayer& connectableLayer,
1069 Optional<DataType> dataType,
1070 std::string& outReasonIfUnsupported)
1072 auto layer = boost::polymorphic_downcast<const Layer*>(&connectableLayer);
1073 return IsLayerSupported(layer->GetBackendId(), connectableLayer, dataType, outReasonIfUnsupported);
1076 // Default Implementations
1077 std::unique_ptr<IWorkload> IWorkloadFactory::CreateAbs(const AbsQueueDescriptor& /*descriptor*/,
1078 const WorkloadInfo& /*info*/) const
1080 return std::unique_ptr<IWorkload>();
1083 std::unique_ptr<IWorkload> IWorkloadFactory::CreateActivation(const ActivationQueueDescriptor& /*descriptor*/,
1084 const WorkloadInfo& /*info*/) const
1086 return std::unique_ptr<IWorkload>();
1089 std::unique_ptr<IWorkload> IWorkloadFactory::CreateAddition(const AdditionQueueDescriptor& /*descriptor*/,
1090 const WorkloadInfo& /*info*/) const
1092 return std::unique_ptr<IWorkload>();
1095 std::unique_ptr<IWorkload> IWorkloadFactory::CreateArgMinMax(const ArgMinMaxQueueDescriptor& /*descriptor*/,
1096 const WorkloadInfo& /*info*/) const
1098 return std::unique_ptr<IWorkload>();
1101 std::unique_ptr<IWorkload> IWorkloadFactory::CreateBatchNormalization(
1102 const BatchNormalizationQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const
1104 return std::unique_ptr<IWorkload>();
1107 std::unique_ptr<IWorkload> IWorkloadFactory::CreateBatchToSpaceNd(const BatchToSpaceNdQueueDescriptor& /*desc*/,
1108 const WorkloadInfo& /*Info*/) const
1110 return std::unique_ptr<IWorkload>();
1113 std::unique_ptr<IWorkload> IWorkloadFactory::CreateComparison(const ComparisonQueueDescriptor& /*descriptor*/,
1114 const WorkloadInfo& /*info*/) const
1116 return std::unique_ptr<IWorkload>();
1119 std::unique_ptr<IWorkload> IWorkloadFactory::CreateConcat(const ConcatQueueDescriptor& /*descriptor*/,
1120 const WorkloadInfo& /*info*/) const
1122 return std::unique_ptr<IWorkload>();
1125 std::unique_ptr<IWorkload> IWorkloadFactory::CreateConstant(const ConstantQueueDescriptor& /*descriptor*/,
1126 const WorkloadInfo& /*info*/) const
1128 return std::unique_ptr<IWorkload>();
1131 std::unique_ptr<IWorkload> IWorkloadFactory::CreateConvertFp16ToFp32(const ConvertFp16ToFp32QueueDescriptor& /*desc*/,
1132 const WorkloadInfo& /*info*/) const
1134 return std::unique_ptr<IWorkload>();
1137 std::unique_ptr<IWorkload> IWorkloadFactory::CreateConvertFp32ToFp16(const ConvertFp32ToFp16QueueDescriptor& /*desc*/,
1138 const WorkloadInfo& /*info*/) const
1140 return std::unique_ptr<IWorkload>();
1143 std::unique_ptr<IWorkload> IWorkloadFactory::CreateConvolution2d(const Convolution2dQueueDescriptor& /*descriptor*/,
1144 const WorkloadInfo& /*info*/) const
1146 return std::unique_ptr<IWorkload>();
1149 std::unique_ptr<IWorkload> IWorkloadFactory::CreateDebug(const DebugQueueDescriptor& /*descriptor*/,
1150 const WorkloadInfo& /*info*/) const
1152 return std::unique_ptr<IWorkload>();
1155 std::unique_ptr<IWorkload> IWorkloadFactory::CreateDepthToSpace(const DepthToSpaceQueueDescriptor& /*descriptor*/,
1156 const WorkloadInfo& /*info*/) const
1158 return std::unique_ptr<IWorkload>();
1161 std::unique_ptr<IWorkload> IWorkloadFactory::CreateDepthwiseConvolution2d(
1162 const DepthwiseConvolution2dQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const
1164 return std::unique_ptr<IWorkload>();
1167 std::unique_ptr<IWorkload> IWorkloadFactory::CreateDequantize(
1168 const DequantizeQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const
1170 return std::unique_ptr<IWorkload>();
1173 std::unique_ptr<IWorkload> IWorkloadFactory::CreateDetectionPostProcess(
1174 const DetectionPostProcessQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const
1176 return std::unique_ptr<IWorkload>();
1179 std::unique_ptr<IWorkload> IWorkloadFactory::CreateDivision(const DivisionQueueDescriptor& /*descriptor*/,
1180 const WorkloadInfo& /*info*/) const
1182 return std::unique_ptr<IWorkload>();
1185 std::unique_ptr<IWorkload> IWorkloadFactory::CreateEqual(const EqualQueueDescriptor& /*descriptor*/,
1186 const WorkloadInfo& /*Info*/) const
1188 return std::unique_ptr<IWorkload>();
1191 std::unique_ptr<IWorkload> IWorkloadFactory::CreateFakeQuantization(const FakeQuantizationQueueDescriptor& /*desc*/,
1192 const WorkloadInfo& /*info*/) const
1194 return std::unique_ptr<IWorkload>();
1197 std::unique_ptr<IWorkload> IWorkloadFactory::CreateFloor(const FloorQueueDescriptor& /*descriptor*/,
1198 const WorkloadInfo& /*info*/) const
1200 return std::unique_ptr<IWorkload>();
1203 std::unique_ptr<IWorkload> IWorkloadFactory::CreateFullyConnected(const FullyConnectedQueueDescriptor& /*descriptor*/,
1204 const WorkloadInfo& /*info*/) const
1206 return std::unique_ptr<IWorkload>();
1209 std::unique_ptr<IWorkload> IWorkloadFactory::CreateGather(const GatherQueueDescriptor& /*descriptor*/,
1210 const WorkloadInfo& /*info*/) const
1212 return std::unique_ptr<IWorkload>();
1215 std::unique_ptr<IWorkload> IWorkloadFactory::CreateGreater(const GreaterQueueDescriptor& /*descriptor*/,
1216 const WorkloadInfo& /*info*/) const
1218 return std::unique_ptr<IWorkload>();
1221 std::unique_ptr<IWorkload> IWorkloadFactory::CreateInstanceNormalization(
1222 const InstanceNormalizationQueueDescriptor& /*descriptor*/,
1223 const WorkloadInfo& /*info*/) const
1225 return std::unique_ptr<IWorkload>();
1228 std::unique_ptr<IWorkload> IWorkloadFactory::CreateL2Normalization(const L2NormalizationQueueDescriptor& /*desc*/,
1229 const WorkloadInfo& /*info*/) const
1231 return std::unique_ptr<IWorkload>();
1234 std::unique_ptr<IWorkload> IWorkloadFactory::CreateLogSoftmax(const LogSoftmaxQueueDescriptor& /*descriptor*/,
1235 const WorkloadInfo& /*info*/) const
1237 return std::unique_ptr<IWorkload>();
1240 std::unique_ptr<IWorkload> IWorkloadFactory::CreateLstm(const LstmQueueDescriptor& /*descriptor*/,
1241 const WorkloadInfo& /*info*/) const
1243 return std::unique_ptr<IWorkload>();
1246 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMaximum(const MaximumQueueDescriptor& /*descriptor*/,
1247 const WorkloadInfo& /*info*/) const
1249 return std::unique_ptr<IWorkload>();
1252 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMean(const MeanQueueDescriptor& /*descriptor*/,
1253 const WorkloadInfo& /*Info*/) const
1255 return std::unique_ptr<IWorkload>();
1258 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMemCopy(const MemCopyQueueDescriptor& /*descriptor*/,
1259 const WorkloadInfo& /*info*/) const
1261 return std::unique_ptr<IWorkload>();
1264 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMemImport(const MemImportQueueDescriptor& /*descriptor*/,
1265 const WorkloadInfo& /*info*/) const
1267 return std::unique_ptr<IWorkload>();
1270 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMerge(const MergeQueueDescriptor& /*descriptor*/,
1271 const WorkloadInfo& /*info*/) const
1273 return std::unique_ptr<IWorkload>();
1276 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMerger(const MergerQueueDescriptor& /*descriptor*/,
1277 const WorkloadInfo& /*info*/) const
1279 return std::unique_ptr<IWorkload>();
1282 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMinimum(const MinimumQueueDescriptor& /*descriptor*/,
1283 const WorkloadInfo& /*info*/) const
1285 return std::unique_ptr<IWorkload>();
1288 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMultiplication(const MultiplicationQueueDescriptor& /*descriptor*/,
1289 const WorkloadInfo& /*info*/) const
1291 return std::unique_ptr<IWorkload>();
1294 std::unique_ptr<IWorkload> IWorkloadFactory::CreateNormalization(const NormalizationQueueDescriptor& /*descriptor*/,
1295 const WorkloadInfo& /*info*/) const
1297 return std::unique_ptr<IWorkload>();
1300 std::unique_ptr<IWorkload> IWorkloadFactory::CreateOutput(const OutputQueueDescriptor& /*descriptor*/,
1301 const WorkloadInfo& /*info*/) const
1303 return std::unique_ptr<IWorkload>();
1306 std::unique_ptr<IWorkload> IWorkloadFactory::CreatePad(const PadQueueDescriptor& /*descriptor*/,
1307 const WorkloadInfo& /*Info*/) const
1309 return std::unique_ptr<IWorkload>();
1312 std::unique_ptr<IWorkload> IWorkloadFactory::CreatePermute(const PermuteQueueDescriptor& /*descriptor*/,
1313 const WorkloadInfo&/**/ /*info*/) const
1315 return std::unique_ptr<IWorkload>();
1318 std::unique_ptr<IWorkload> IWorkloadFactory::CreatePooling2d(const Pooling2dQueueDescriptor& /*descriptor*/,
1319 const WorkloadInfo& /*info*/) const
1321 return std::unique_ptr<IWorkload>();
1324 std::unique_ptr<IWorkload> IWorkloadFactory::CreatePreCompiled(const PreCompiledQueueDescriptor& /*descriptor*/,
1325 const WorkloadInfo& /*info*/) const
1327 return std::unique_ptr<IWorkload>();
1330 std::unique_ptr<IWorkload> IWorkloadFactory::CreatePrelu(const PreluQueueDescriptor &/*descriptor*/,
1331 const WorkloadInfo &/*info*/) const
1333 return std::unique_ptr<IWorkload>();
1336 std::unique_ptr<IWorkload> IWorkloadFactory::CreateQuantize(const QuantizeQueueDescriptor& /*descriptor*/,
1337 const WorkloadInfo& /*Info*/) const
1339 return std::unique_ptr<IWorkload>();
1342 std::unique_ptr<IWorkload> IWorkloadFactory::CreateQuantizedLstm(const QuantizedLstmQueueDescriptor& /*descriptor*/,
1343 const WorkloadInfo& /*info*/) const
1345 return std::unique_ptr<IWorkload>();
1348 std::unique_ptr<IWorkload> IWorkloadFactory::CreateReshape(const ReshapeQueueDescriptor& /*descriptor*/,
1349 const WorkloadInfo& /*info*/) const
1351 return std::unique_ptr<IWorkload>();
1354 std::unique_ptr<IWorkload> IWorkloadFactory::CreateResizeBilinear(const ResizeBilinearQueueDescriptor& /*descriptor*/,
1355 const WorkloadInfo& /*info*/) const
1357 return std::unique_ptr<IWorkload>();
1360 std::unique_ptr<IWorkload> IWorkloadFactory::CreateResize(const ResizeQueueDescriptor& /*descriptor*/,
1361 const WorkloadInfo& /*info*/) const
1363 return std::unique_ptr<IWorkload>();
1366 std::unique_ptr<IWorkload> IWorkloadFactory::CreateRsqrt(const RsqrtQueueDescriptor& /*descriptor*/,
1367 const WorkloadInfo& /*info*/) const
1369 return std::unique_ptr<IWorkload>();
1372 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSlice(const SliceQueueDescriptor& /*descriptor*/,
1373 const WorkloadInfo& /*info*/) const
1375 return std::unique_ptr<IWorkload>();
1378 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSoftmax(const SoftmaxQueueDescriptor& /*descriptor*/,
1379 const WorkloadInfo& /*info*/) const
1381 return std::unique_ptr<IWorkload>();
1384 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSplitter(const SplitterQueueDescriptor& /*descriptor*/,
1385 const WorkloadInfo& /*info*/) const
1387 return std::unique_ptr<IWorkload>();
1390 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSpaceToBatchNd(const SpaceToBatchNdQueueDescriptor& /*descriptor*/,
1391 const WorkloadInfo& /*info*/) const
1393 return std::unique_ptr<IWorkload>();
1396 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSpaceToDepth(const SpaceToDepthQueueDescriptor& /*descriptor*/,
1397 const WorkloadInfo& /*info*/) const
1399 return std::unique_ptr<IWorkload>();
1402 std::unique_ptr<IWorkload> IWorkloadFactory::CreateStack(const StackQueueDescriptor& /*descriptor*/,
1403 const WorkloadInfo& /*info*/) const
1405 return std::unique_ptr<IWorkload>();
1408 std::unique_ptr<IWorkload> IWorkloadFactory::CreateStridedSlice(const StridedSliceQueueDescriptor& /*descriptor*/,
1409 const WorkloadInfo& /*info*/) const
1411 return std::unique_ptr<IWorkload>();
1414 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSubtraction(const SubtractionQueueDescriptor& /*descriptor*/,
1415 const WorkloadInfo& /*info*/) const
1417 return std::unique_ptr<IWorkload>();
1420 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSwitch(const SwitchQueueDescriptor& /*descriptor*/,
1421 const WorkloadInfo& /*info*/) const
1423 return std::unique_ptr<IWorkload>();
1426 std::unique_ptr<IWorkload> IWorkloadFactory::CreateTransposeConvolution2d(
1427 const TransposeConvolution2dQueueDescriptor& /*descriptor*/,
1428 const WorkloadInfo& /*info*/) const
1430 return std::unique_ptr<IWorkload>();
1433 } // namepsace armnn