2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
6 #include "CpuTensorHandle.hpp"
7 #include "WorkloadFactory.hpp"
11 #include <LayersFwd.hpp>
13 #include <armnn/Types.hpp>
14 #include <armnn/LayerSupport.hpp>
15 #include <armnn/ILayerSupport.hpp>
17 #include <backendsCommon/BackendRegistry.hpp>
18 #include <backendsCommon/WorkloadFactory.hpp>
19 #include <backendsCommon/IBackendInternal.hpp>
20 #include <backendsCommon/test/WorkloadTestUtils.hpp>
22 #include <boost/cast.hpp>
23 #include <boost/iterator/transform_iterator.hpp>
34 const TensorInfo OverrideDataType(const TensorInfo& info, Optional<DataType> type)
41 return TensorInfo(info.GetShape(), type.value(), info.GetQuantizationScale(), info.GetQuantizationOffset());
44 } // anonymous namespace
46 bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId,
47 const IConnectableLayer& connectableLayer,
48 Optional<DataType> dataType,
49 std::string& outReasonIfUnsupported)
51 Optional<std::string&> reason = outReasonIfUnsupported;
53 const Layer& layer = *(boost::polymorphic_downcast<const Layer*>(&connectableLayer));
55 auto const& backendRegistry = BackendRegistryInstance();
56 if (!backendRegistry.IsBackendRegistered(backendId))
59 ss << connectableLayer.GetName() << " is not supported on " << backendId
60 << " because this backend is not registered.";
62 outReasonIfUnsupported = ss.str();
66 auto backendFactory = backendRegistry.GetFactory(backendId);
67 auto backendObject = backendFactory();
68 auto layerSupportObject = backendObject->GetLayerSupport();
70 switch(layer.GetType())
74 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
75 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
76 result = layerSupportObject->IsAbsSupported(OverrideDataType(input, dataType),
77 OverrideDataType(output, dataType),
81 case LayerType::Activation:
83 auto cLayer = boost::polymorphic_downcast<const ActivationLayer*>(&layer);
84 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
85 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
86 result = layerSupportObject->IsActivationSupported(
87 OverrideDataType(input, dataType),
88 OverrideDataType(output, dataType),
89 cLayer->GetParameters(),
93 case LayerType::Addition:
95 const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
96 const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
97 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
98 result = layerSupportObject->IsAdditionSupported(
99 OverrideDataType(input0, dataType),
100 OverrideDataType(input1, dataType),
101 OverrideDataType(output, dataType),
105 case LayerType::BatchNormalization:
107 auto cLayer = boost::polymorphic_downcast<const BatchNormalizationLayer*>(&layer);
108 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
109 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
110 const TensorInfo& mean = cLayer->m_Mean->GetTensorInfo();
111 const TensorInfo& var = cLayer->m_Variance->GetTensorInfo();
112 const TensorInfo& beta = cLayer->m_Beta->GetTensorInfo();
113 const TensorInfo& gamma = cLayer->m_Gamma->GetTensorInfo();
114 result = layerSupportObject->IsBatchNormalizationSupported(
115 OverrideDataType(input, dataType),
116 OverrideDataType(output, dataType),
117 OverrideDataType(mean, dataType),
118 OverrideDataType(var, dataType),
119 OverrideDataType(beta, dataType),
120 OverrideDataType(gamma, dataType),
121 cLayer->GetParameters(),
125 case LayerType::BatchToSpaceNd:
127 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
128 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
129 auto cLayer = boost::polymorphic_downcast<const BatchToSpaceNdLayer*>(&layer);
131 result = layerSupportObject->IsBatchToSpaceNdSupported(OverrideDataType(input, dataType),
132 OverrideDataType(output, dataType),
133 cLayer->GetParameters(),
137 case LayerType::Constant:
139 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
140 result = layerSupportObject->IsConstantSupported(OverrideDataType(output, dataType), reason);
143 case LayerType::ConvertFp16ToFp32:
145 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
146 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
147 result = layerSupportObject->IsConvertFp16ToFp32Supported(input, output, reason);
150 case LayerType::ConvertFp32ToFp16:
152 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
153 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
154 result = layerSupportObject->IsConvertFp32ToFp16Supported(input, output, reason);
157 case LayerType::Convolution2d:
159 auto cLayer = boost::polymorphic_downcast<const Convolution2dLayer*>(&layer);
161 const TensorInfo input = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(),
163 const TensorInfo output = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
164 BOOST_ASSERT(cLayer->m_Weight.get() != nullptr);
166 const Convolution2dDescriptor& descriptor = cLayer->GetParameters();
168 // Construct optional biases object based on the value of m_BiasEnabled
169 Optional<TensorInfo> biases;
170 if (descriptor.m_BiasEnabled)
173 OverrideDataType(cLayer->m_Bias->GetTensorInfo(), GetBiasTypeFromWeightsType(dataType));
176 result = layerSupportObject->IsConvolution2dSupported(
180 OverrideDataType(cLayer->m_Weight->GetTensorInfo(), dataType),
185 case LayerType::Debug:
187 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
188 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
190 result = layerSupportObject->IsDebugSupported(OverrideDataType(input, dataType),
191 OverrideDataType(output, dataType),
195 case LayerType::DepthwiseConvolution2d:
197 auto cLayer = boost::polymorphic_downcast<const DepthwiseConvolution2dLayer*>(&layer);
198 const TensorInfo& input = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(),
200 const TensorInfo& output = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
201 BOOST_ASSERT(cLayer->m_Weight.get() != nullptr);
203 const DepthwiseConvolution2dDescriptor& descriptor = cLayer->GetParameters();
205 // Construct optional biases object based on the value of m_BiasEnabled
206 Optional<TensorInfo> biases;
207 if (descriptor.m_BiasEnabled)
210 OverrideDataType(cLayer->m_Bias->GetTensorInfo(), GetBiasTypeFromWeightsType(dataType));
213 result = layerSupportObject->IsDepthwiseConvolutionSupported(
217 OverrideDataType(cLayer->m_Weight->GetTensorInfo(), dataType),
222 case LayerType::Dequantize:
224 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
225 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
227 result = layerSupportObject->IsDequantizeSupported(OverrideDataType(input, dataType),
228 OverrideDataType(output, DataType::Float32),
232 case LayerType::DetectionPostProcess:
234 const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
235 const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
236 auto cLayer = boost::polymorphic_downcast<const DetectionPostProcessLayer*>(&layer);
237 const DetectionPostProcessDescriptor& descriptor = cLayer->GetParameters();
238 result = layerSupportObject->IsDetectionPostProcessSupported(input0,
244 case LayerType::Equal:
246 const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
247 const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
248 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
249 result = layerSupportObject->IsEqualSupported(OverrideDataType(input0, dataType),
250 OverrideDataType(input1, dataType),
251 OverrideDataType(output, dataType),
255 case LayerType::FakeQuantization:
257 auto cLayer = boost::polymorphic_downcast<const FakeQuantizationLayer*>(&layer);
258 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
259 result = layerSupportObject->IsFakeQuantizationSupported(OverrideDataType(input, dataType),
260 cLayer->GetParameters(),
264 case LayerType::Floor:
266 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
267 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
268 result = layerSupportObject->IsFloorSupported(OverrideDataType(input, dataType),
269 OverrideDataType(output, dataType),
273 case LayerType::FullyConnected:
275 auto cLayer = boost::polymorphic_downcast<const FullyConnectedLayer*>(&layer);
276 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
277 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
278 BOOST_ASSERT(cLayer->m_Weight.get() != nullptr);
281 const TensorInfo * biasInfoPtr = nullptr;
282 static const TensorInfo dummyFloat16Bias(TensorShape({1,1,1,1}), DataType::Float16);
283 static const TensorInfo dummyFloat32Bias(TensorShape({1,1,1,1}), DataType::Float32);
284 static const TensorInfo dummyQA8Bias(TensorShape({1,1,1,1}), DataType::Signed32);
286 const FullyConnectedDescriptor& descriptor = cLayer->GetParameters();
287 if (descriptor.m_BiasEnabled)
289 BOOST_ASSERT(cLayer->m_Bias.get() != nullptr);
290 biasInfo = OverrideDataType(cLayer->m_Bias->GetTensorInfo(), GetBiasTypeFromWeightsType(dataType));
291 biasInfoPtr = &biasInfo;
295 // If biases are not enabled pass a dummy tensorinfo for the validation
296 switch(input.GetDataType())
298 case DataType::Float16:
300 biasInfoPtr = &dummyFloat16Bias;
303 case DataType::Float32:
305 biasInfoPtr = &dummyFloat32Bias;
308 case DataType::QuantisedAsymm8:
309 case DataType::QuantisedSymm16:
311 biasInfoPtr = &dummyQA8Bias;
316 BOOST_ASSERT_MSG(false, "Unexpected bias type");
321 result = layerSupportObject->IsFullyConnectedSupported(
322 OverrideDataType(input, dataType),
323 OverrideDataType(output, dataType),
324 OverrideDataType(cLayer->m_Weight->GetTensorInfo(), dataType),
330 case LayerType::Gather:
332 const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
333 const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
334 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
335 result = layerSupportObject->IsGatherSupported(OverrideDataType(input0, dataType),
337 OverrideDataType(output, dataType),
341 case LayerType::Input:
343 const TensorInfo& input = layer.GetOutputSlot(0).GetTensorInfo();
344 result = layerSupportObject->IsInputSupported(OverrideDataType(input, dataType), reason);
347 case LayerType::L2Normalization:
349 auto cLayer = boost::polymorphic_downcast<const L2NormalizationLayer*>(&layer);
350 const L2NormalizationDescriptor& descriptor = cLayer->GetParameters();
352 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
353 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
355 result = layerSupportObject->IsL2NormalizationSupported(
356 OverrideDataType(input, dataType),
357 OverrideDataType(output, dataType),
362 case LayerType::Lstm:
364 auto cLayer = boost::polymorphic_downcast<const LstmLayer*>(&layer);
365 const LstmDescriptor& descriptor = cLayer->GetParameters();
368 const TensorInfo& input = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(),
370 const TensorInfo& outputStateIn = OverrideDataType(layer.GetInputSlot(1).GetConnection()->GetTensorInfo(),
372 const TensorInfo& cellStateIn = OverrideDataType(layer.GetInputSlot(2).GetConnection()->GetTensorInfo(),
375 const TensorInfo& scratchBuffer = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
376 const TensorInfo& outputStateOut = OverrideDataType(layer.GetOutputSlot(1).GetTensorInfo(), dataType);
377 const TensorInfo& cellStateOut = OverrideDataType(layer.GetOutputSlot(2).GetTensorInfo(), dataType);
378 const TensorInfo& output = OverrideDataType(layer.GetOutputSlot(3).GetTensorInfo(), dataType);
381 const TensorInfo& inputToForgetWeights
382 = OverrideDataType(cLayer->m_BasicParameters.m_InputToForgetWeights->GetTensorInfo(), dataType);
383 const TensorInfo& inputToCellWeights
384 = OverrideDataType(cLayer->m_BasicParameters.m_InputToCellWeights->GetTensorInfo(), dataType);
385 const TensorInfo& inputToOutputWeights
386 = OverrideDataType(cLayer->m_BasicParameters.m_InputToOutputWeights->GetTensorInfo(), dataType);
387 const TensorInfo& recurrentToForgetWeights
388 = OverrideDataType(cLayer->m_BasicParameters.m_RecurrentToForgetWeights->GetTensorInfo(), dataType);
389 const TensorInfo& recurrentToCellWeights
390 = OverrideDataType(cLayer->m_BasicParameters.m_RecurrentToCellWeights->GetTensorInfo(), dataType);
391 const TensorInfo& recurrentToOutputWeights
392 = OverrideDataType(cLayer->m_BasicParameters.m_RecurrentToOutputWeights->GetTensorInfo(), dataType);
393 const TensorInfo& forgetGateBias
394 = OverrideDataType(cLayer->m_BasicParameters.m_ForgetGateBias->GetTensorInfo(), dataType);
395 const TensorInfo& cellBias
396 = OverrideDataType(cLayer->m_BasicParameters.m_CellBias->GetTensorInfo(), dataType);
397 const TensorInfo& outputGateBias
398 = OverrideDataType(cLayer->m_BasicParameters.m_OutputGateBias->GetTensorInfo(), dataType);
400 LstmInputParamsInfo paramsInfo;
402 paramsInfo.m_InputToForgetWeights = &inputToForgetWeights;
403 paramsInfo.m_InputToCellWeights = &inputToCellWeights;
404 paramsInfo.m_InputToOutputWeights = &inputToOutputWeights;
405 paramsInfo.m_RecurrentToForgetWeights = &recurrentToForgetWeights;
406 paramsInfo.m_RecurrentToCellWeights = &recurrentToCellWeights;
407 paramsInfo.m_RecurrentToOutputWeights = &recurrentToOutputWeights;
408 paramsInfo.m_ForgetGateBias = &forgetGateBias;
409 paramsInfo.m_CellBias = &cellBias;
410 paramsInfo.m_OutputGateBias = &outputGateBias;
413 // Optional parameters
414 TensorInfo optInputToInputWeights;
415 TensorInfo optRecurrentToInputWeights;
416 TensorInfo optCellToInputWeights;
417 TensorInfo optInputGateBias;
418 TensorInfo optProjectionWeights;
419 TensorInfo optProjectionBias;
420 TensorInfo optCellToForgetWeights;
421 TensorInfo optCellToOutputWeights;
422 TensorInfo optInputLayerNormWeights;
423 TensorInfo optForgetLayerNormWeights;
424 TensorInfo optCellLayerNormWeights;
425 TensorInfo optOutputLayerNormWeights;
427 if(!descriptor.m_CifgEnabled)
429 optInputToInputWeights =
430 OverrideDataType(cLayer->m_CifgParameters.m_InputToInputWeights->GetTensorInfo(), dataType);
431 paramsInfo.m_InputToInputWeights = &optInputToInputWeights;
433 optRecurrentToInputWeights =
434 OverrideDataType(cLayer->m_CifgParameters.m_RecurrentToInputWeights->GetTensorInfo(), dataType);
435 paramsInfo.m_RecurrentToInputWeights = &optRecurrentToInputWeights;
436 if (cLayer->m_CifgParameters.m_CellToInputWeights != nullptr)
438 optCellToInputWeights =
439 OverrideDataType(cLayer->m_CifgParameters.m_CellToInputWeights->GetTensorInfo(), dataType);
440 paramsInfo.m_CellToInputWeights = &optCellToInputWeights;
443 OverrideDataType(cLayer->m_CifgParameters.m_InputGateBias->GetTensorInfo(), dataType);
444 paramsInfo.m_InputGateBias = &optInputGateBias;
447 if(descriptor.m_ProjectionEnabled)
449 optProjectionWeights =
450 OverrideDataType(cLayer->m_ProjectionParameters.m_ProjectionWeights->GetTensorInfo(), dataType);
451 paramsInfo.m_ProjectionWeights = &optProjectionWeights;
452 if (cLayer->m_ProjectionParameters.m_ProjectionBias != nullptr)
455 OverrideDataType(cLayer->m_ProjectionParameters.m_ProjectionBias->GetTensorInfo(), dataType);
456 paramsInfo.m_ProjectionBias = &optProjectionBias;
460 if(descriptor.m_PeepholeEnabled)
462 optCellToForgetWeights =
463 OverrideDataType(cLayer->m_PeepholeParameters.m_CellToForgetWeights->GetTensorInfo(), dataType);
464 paramsInfo.m_CellToForgetWeights = &optCellToForgetWeights;
465 optCellToOutputWeights =
466 OverrideDataType(cLayer->m_PeepholeParameters.m_CellToOutputWeights->GetTensorInfo(), dataType);
467 paramsInfo.m_CellToOutputWeights = &optCellToOutputWeights;
470 if(descriptor.m_LayerNormEnabled)
472 if (!descriptor.m_CifgEnabled)
474 optInputLayerNormWeights = OverrideDataType(
475 cLayer->m_LayerNormParameters.m_InputLayerNormWeights->GetTensorInfo(), dataType);
476 paramsInfo.m_InputLayerNormWeights = &optInputLayerNormWeights;
479 optForgetLayerNormWeights = OverrideDataType(
480 cLayer->m_LayerNormParameters.m_ForgetLayerNormWeights->GetTensorInfo(), dataType);
481 paramsInfo.m_ForgetLayerNormWeights = &optForgetLayerNormWeights;
483 optCellLayerNormWeights = OverrideDataType(
484 cLayer->m_LayerNormParameters.m_CellLayerNormWeights->GetTensorInfo(), dataType);
485 paramsInfo.m_CellLayerNormWeights = &optCellLayerNormWeights;
487 optOutputLayerNormWeights = OverrideDataType(
488 cLayer->m_LayerNormParameters.m_OutputLayerNormWeights->GetTensorInfo(), dataType);
489 paramsInfo.m_OutputLayerNormWeights = &optOutputLayerNormWeights;
492 result = layerSupportObject->IsLstmSupported(
505 case LayerType::Maximum:
507 const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
508 const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
509 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
511 result = layerSupportObject->IsMaximumSupported(OverrideDataType(input0, dataType),
512 OverrideDataType(input1, dataType),
513 OverrideDataType(output, dataType),
517 case LayerType::MemCopy:
519 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
520 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
522 result = layerSupportObject->IsMemCopySupported(OverrideDataType(input, dataType),
523 OverrideDataType(output, dataType),
527 case LayerType::MemImport:
529 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
530 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
532 result = layerSupportObject->IsMemImportSupported(OverrideDataType(input, dataType),
533 OverrideDataType(output, dataType),
537 case LayerType::Merge:
539 const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
540 const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
541 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
543 result = layerSupportObject->IsMergeSupported(OverrideDataType(input0, dataType),
544 OverrideDataType(input1, dataType),
545 OverrideDataType(output, dataType),
549 case LayerType::Concat:
551 auto cLayer = boost::polymorphic_downcast<const ConcatLayer*>(&layer);
553 // Get vector of all inputs.
554 auto getTensorInfo = [&dataType](const InputSlot& slot)
556 return OverrideDataType(slot.GetConnectedOutputSlot()->GetTensorInfo(), dataType);
558 auto beginI = boost::make_transform_iterator(layer.GetInputSlots().begin(), getTensorInfo);
559 auto endI = boost::make_transform_iterator(layer.GetInputSlots().end(), getTensorInfo);
560 std::vector<TensorInfo> inputs(beginI, endI);
562 auto getTensorInfoPtr = [](const TensorInfo& info)
566 auto beginPtr = boost::make_transform_iterator(inputs.begin(), getTensorInfoPtr);
567 auto endPtr = boost::make_transform_iterator(inputs.end(), getTensorInfoPtr);
568 std::vector<const TensorInfo*> inputPtrs(beginPtr, endPtr);
570 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
572 result = layerSupportObject->IsConcatSupported(inputPtrs, output, cLayer->GetParameters(), reason);
577 case LayerType::Multiplication:
579 const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
580 const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
581 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
582 result = layerSupportObject->IsMultiplicationSupported(
583 OverrideDataType(input0, dataType),
584 OverrideDataType(input1, dataType),
585 OverrideDataType(output, dataType),
589 case LayerType::Normalization:
591 auto cLayer = boost::polymorphic_downcast<const NormalizationLayer*>(&layer);
592 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
593 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
594 result = layerSupportObject->IsNormalizationSupported(OverrideDataType(input, dataType),
595 OverrideDataType(output, dataType),
596 cLayer->GetParameters(),
600 case LayerType::Output:
602 const TensorInfo& output = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
603 result = layerSupportObject->IsOutputSupported(OverrideDataType(output, dataType), reason);
606 case LayerType::Permute:
608 auto cLayer = boost::polymorphic_downcast<const PermuteLayer*>(&layer);
609 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
610 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
611 result = layerSupportObject->IsPermuteSupported(OverrideDataType(input, dataType),
612 OverrideDataType(output, dataType),
613 cLayer->GetParameters(),
619 auto cLayer = boost::polymorphic_downcast<const PadLayer*>(&layer);
620 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
621 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
622 result = layerSupportObject->IsPadSupported(
623 OverrideDataType(input, dataType),
624 OverrideDataType(output, dataType),
625 cLayer->GetParameters(),
629 case LayerType::Pooling2d:
631 auto cLayer = boost::polymorphic_downcast<const Pooling2dLayer*>(&layer);
632 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
633 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
634 result = layerSupportObject->IsPooling2dSupported(OverrideDataType(input, dataType),
635 OverrideDataType(output, dataType),
636 cLayer->GetParameters(),
640 case LayerType::PreCompiled:
642 auto cLayer = boost::polymorphic_downcast<const PreCompiledLayer*>(&layer);
643 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
644 result = layerSupportObject->IsPreCompiledSupported(OverrideDataType(input, dataType),
645 cLayer->GetParameters(),
649 case LayerType::Quantize:
651 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
652 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
653 result = layerSupportObject->IsQuantizeSupported(input, output, reason);
656 case LayerType::QuantizedLstm:
658 auto cLayer = boost::polymorphic_downcast<const QuantizedLstmLayer*>(&layer);
661 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
662 const TensorInfo& previousCellStateIn = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
663 const TensorInfo& previousOutputIn = layer.GetInputSlot(2).GetConnection()->GetTensorInfo();
666 const TensorInfo& cellStateOut = layer.GetOutputSlot(0).GetTensorInfo();
667 const TensorInfo& output = layer.GetOutputSlot(1).GetTensorInfo();
669 // QuantizedLstm parameters
670 QuantizedLstmInputParamsInfo paramsInfo;
672 paramsInfo.m_InputToInputWeights =
673 &cLayer->m_QuantizedLstmParameters.m_InputToInputWeights->GetTensorInfo();
674 paramsInfo.m_InputToForgetWeights =
675 &cLayer->m_QuantizedLstmParameters.m_InputToForgetWeights->GetTensorInfo();
676 paramsInfo.m_InputToCellWeights =
677 &cLayer->m_QuantizedLstmParameters.m_InputToCellWeights->GetTensorInfo();
678 paramsInfo.m_InputToOutputWeights =
679 &cLayer->m_QuantizedLstmParameters.m_InputToOutputWeights->GetTensorInfo();
681 paramsInfo.m_RecurrentToInputWeights =
682 &cLayer->m_QuantizedLstmParameters.m_RecurrentToInputWeights->GetTensorInfo();
683 paramsInfo.m_RecurrentToForgetWeights =
684 &cLayer->m_QuantizedLstmParameters.m_RecurrentToForgetWeights->GetTensorInfo();
685 paramsInfo.m_RecurrentToCellWeights =
686 &cLayer->m_QuantizedLstmParameters.m_RecurrentToCellWeights->GetTensorInfo();
687 paramsInfo.m_RecurrentToOutputWeights =
688 &cLayer->m_QuantizedLstmParameters.m_RecurrentToOutputWeights->GetTensorInfo();
690 paramsInfo.m_InputGateBias =
691 &cLayer->m_QuantizedLstmParameters.m_InputGateBias->GetTensorInfo();
692 paramsInfo.m_ForgetGateBias =
693 &cLayer->m_QuantizedLstmParameters.m_ForgetGateBias->GetTensorInfo();
694 paramsInfo.m_CellBias =
695 &cLayer->m_QuantizedLstmParameters.m_CellBias->GetTensorInfo();
696 paramsInfo.m_OutputGateBias =
697 &cLayer->m_QuantizedLstmParameters.m_OutputGateBias->GetTensorInfo();;
699 result = layerSupportObject->IsQuantizedLstmSupported(input,
708 case LayerType::Division:
710 const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
711 const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
712 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
713 result = layerSupportObject->IsDivisionSupported(
714 OverrideDataType(input0, dataType),
715 OverrideDataType(input1, dataType),
716 OverrideDataType(output, dataType),
720 case LayerType::Reshape:
722 auto cLayer = boost::polymorphic_downcast<const ReshapeLayer*>(&layer);
723 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
724 result = layerSupportObject->IsReshapeSupported(OverrideDataType(input, dataType),
725 cLayer->GetParameters(),
729 case LayerType::Resize:
731 auto cLayer = boost::polymorphic_downcast<const ResizeLayer*>(&layer);
732 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
733 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
734 result = layerSupportObject->IsResizeSupported(OverrideDataType(input, dataType),
735 OverrideDataType(output, dataType),
736 cLayer->GetParameters(),
740 case LayerType::Rsqrt:
742 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
743 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
744 result = layerSupportObject->IsRsqrtSupported(OverrideDataType(input, dataType),
745 OverrideDataType(output, dataType),
749 case LayerType::Softmax:
751 auto cLayer = boost::polymorphic_downcast<const SoftmaxLayer*>(&layer);
752 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
753 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
754 result = layerSupportObject->IsSoftmaxSupported(OverrideDataType(input, dataType),
755 OverrideDataType(output, dataType),
756 cLayer->GetParameters(),
760 case LayerType::SpaceToBatchNd:
762 auto cLayer = boost::polymorphic_downcast<const SpaceToBatchNdLayer*>(&layer);
763 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
764 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
765 result = layerSupportObject->IsSpaceToBatchNdSupported(OverrideDataType(input, dataType),
766 OverrideDataType(output, dataType),
767 cLayer->GetParameters(),
771 case LayerType::SpaceToDepth:
773 auto cLayer = boost::polymorphic_downcast<const SpaceToDepthLayer*>(&layer);
775 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
776 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
778 result = layerSupportObject->IsSpaceToDepthSupported(OverrideDataType(input, dataType),
779 OverrideDataType(output, dataType),
780 cLayer->GetParameters(),
784 case LayerType::Splitter:
786 auto cLayer = boost::polymorphic_downcast<const SplitterLayer*>(&layer);
787 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
789 // Get vector of all outputs.
790 auto getTensorInfo = [&dataType](const OutputSlot& slot)
792 return OverrideDataType(slot.GetTensorInfo(), dataType);
794 auto beginI = boost::make_transform_iterator(layer.GetOutputSlots().begin(), getTensorInfo);
795 auto endI = boost::make_transform_iterator(layer.GetOutputSlots().end(), getTensorInfo);
796 std::vector<TensorInfo> outputs(beginI, endI);
798 const std::vector<std::reference_wrapper<TensorInfo>> outputPtrs(outputs.begin(), outputs.end());
800 result = layerSupportObject->IsSplitterSupported(OverrideDataType(input, dataType),
802 cLayer->GetParameters(),
806 case LayerType::Stack:
808 auto cLayer = boost::polymorphic_downcast<const StackLayer*>(&layer);
810 // Get vector of all inputs.
811 auto getTensorInfo = [&dataType](const InputSlot& slot)
813 return OverrideDataType(slot.GetConnectedOutputSlot()->GetTensorInfo(), dataType);
815 auto beginI = boost::make_transform_iterator(layer.GetInputSlots().begin(), getTensorInfo);
816 auto endI = boost::make_transform_iterator(layer.GetInputSlots().end(), getTensorInfo);
817 std::vector<TensorInfo> inputs(beginI, endI);
819 auto getTensorInfoPtr = [](const TensorInfo& info)
823 auto beginPtr = boost::make_transform_iterator(inputs.begin(), getTensorInfoPtr);
824 auto endPtr = boost::make_transform_iterator(inputs.end(), getTensorInfoPtr);
825 std::vector<const TensorInfo*> inputPtrs(beginPtr, endPtr);
827 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
829 result = layerSupportObject->IsStackSupported(inputPtrs, output, cLayer->GetParameters(), reason);
833 case LayerType::StridedSlice:
835 auto cLayer = boost::polymorphic_downcast<const StridedSliceLayer*>(&layer);
836 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
837 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
838 result = layerSupportObject->IsStridedSliceSupported(OverrideDataType(input, dataType),
839 OverrideDataType(output, dataType),
840 cLayer->GetParameters(),
844 case LayerType::Subtraction:
846 const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
847 const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
848 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
849 result = layerSupportObject->IsSubtractionSupported(
850 OverrideDataType(input0, dataType),
851 OverrideDataType(input1, dataType),
852 OverrideDataType(output, dataType),
856 case LayerType::Switch:
858 const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
859 const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
860 const TensorInfo& output0 = layer.GetOutputSlot(0).GetTensorInfo();
861 const TensorInfo& output1 = layer.GetOutputSlot(1).GetTensorInfo();
862 result = layerSupportObject->IsSwitchSupported(OverrideDataType(input0, dataType),
863 OverrideDataType(input1, dataType),
864 OverrideDataType(output0, dataType),
865 OverrideDataType(output1, dataType),
869 case LayerType::Mean:
871 auto cLayer = boost::polymorphic_downcast<const MeanLayer*>(&layer);
872 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
873 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
874 result = layerSupportObject->IsMeanSupported(
875 OverrideDataType(input, dataType),
876 OverrideDataType(output, dataType),
877 cLayer->GetParameters(),
881 case LayerType::Minimum:
883 const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
884 const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
885 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
886 result = layerSupportObject->IsMinimumSupported(OverrideDataType(input0, dataType),
887 OverrideDataType(input1, dataType),
888 OverrideDataType(output, dataType),
892 case LayerType::Greater:
894 const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
895 const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
896 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
897 result = layerSupportObject->IsGreaterSupported(OverrideDataType(input0, dataType),
898 OverrideDataType(input1, dataType),
899 OverrideDataType(output, DataType::Boolean),
903 case LayerType::Prelu:
905 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
906 const TensorInfo& alpha = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
907 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
908 result = layerSupportObject->IsPreluSupported(OverrideDataType(input, dataType),
909 OverrideDataType(alpha, dataType),
910 OverrideDataType(output, dataType),
914 case LayerType::TransposeConvolution2d:
916 auto cLayer = boost::polymorphic_downcast<const TransposeConvolution2dLayer*>(&layer);
918 const TensorInfo input = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(),
920 const TensorInfo output = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
922 const TransposeConvolution2dDescriptor& descriptor = cLayer->GetParameters();
924 Optional<TensorInfo> biases;
925 if (descriptor.m_BiasEnabled)
927 BOOST_ASSERT(cLayer->m_Bias.get() != nullptr);
928 biases = OverrideDataType(cLayer->m_Bias->GetTensorInfo(),
929 GetBiasTypeFromWeightsType(dataType));
932 BOOST_ASSERT(cLayer->m_Weight.get() != nullptr);
933 const TensorInfo weights = OverrideDataType(cLayer->m_Weight->GetTensorInfo(), dataType);
935 result = layerSupportObject->IsTransposeConvolution2dSupported(input,
946 BOOST_ASSERT_MSG(false, "WorkloadFactory did not recognise type of layer.");
947 reason.value() = "Unrecognised layer type";
955 bool IWorkloadFactory::IsLayerSupported(const IConnectableLayer& connectableLayer,
956 Optional<DataType> dataType,
957 std::string& outReasonIfUnsupported)
959 auto layer = boost::polymorphic_downcast<const Layer*>(&connectableLayer);
960 return IsLayerSupported(layer->GetBackendId(), connectableLayer, dataType, outReasonIfUnsupported);
963 // Default Implementations
964 std::unique_ptr<IWorkload> IWorkloadFactory::CreateAbs(const AbsQueueDescriptor& descriptor,
965 const WorkloadInfo& info) const
967 return std::unique_ptr<IWorkload>();
970 std::unique_ptr<IWorkload> IWorkloadFactory::CreateActivation(const ActivationQueueDescriptor& descriptor,
971 const WorkloadInfo& info) const
973 return std::unique_ptr<IWorkload>();
976 std::unique_ptr<IWorkload> IWorkloadFactory::CreateAddition(const AdditionQueueDescriptor& descriptor,
977 const WorkloadInfo& info) const
979 return std::unique_ptr<IWorkload>();
982 std::unique_ptr<IWorkload> IWorkloadFactory::CreateBatchNormalization(
983 const BatchNormalizationQueueDescriptor& descriptor, const WorkloadInfo& info) const
985 return std::unique_ptr<IWorkload>();
988 std::unique_ptr<IWorkload> IWorkloadFactory::CreateBatchToSpaceNd(const BatchToSpaceNdQueueDescriptor& descriptor,
989 const WorkloadInfo& Info) const
991 return std::unique_ptr<IWorkload>();
994 std::unique_ptr<IWorkload> IWorkloadFactory::CreateConcat(const ConcatQueueDescriptor& descriptor,
995 const WorkloadInfo& info) const
997 return std::unique_ptr<IWorkload>();
1000 std::unique_ptr<IWorkload> IWorkloadFactory::CreateConstant(const ConstantQueueDescriptor& descriptor,
1001 const WorkloadInfo& info) const
1003 return std::unique_ptr<IWorkload>();
1006 std::unique_ptr<IWorkload> IWorkloadFactory::CreateConvertFp16ToFp32(const ConvertFp16ToFp32QueueDescriptor& descriptor,
1007 const WorkloadInfo& info) const
1009 return std::unique_ptr<IWorkload>();
1012 std::unique_ptr<IWorkload> IWorkloadFactory::CreateConvertFp32ToFp16(const ConvertFp32ToFp16QueueDescriptor& descriptor,
1013 const WorkloadInfo& info) const
1015 return std::unique_ptr<IWorkload>();
1018 std::unique_ptr<IWorkload> IWorkloadFactory::CreateConvolution2d(const Convolution2dQueueDescriptor& descriptor,
1019 const WorkloadInfo& info) const
1021 return std::unique_ptr<IWorkload>();
1024 std::unique_ptr<IWorkload> IWorkloadFactory::CreateDebug(const DebugQueueDescriptor& descriptor,
1025 const WorkloadInfo& info) const
1027 return std::unique_ptr<IWorkload>();
1030 std::unique_ptr<IWorkload> IWorkloadFactory::CreateDepthwiseConvolution2d(
1031 const DepthwiseConvolution2dQueueDescriptor& descriptor, const WorkloadInfo& info) const
1033 return std::unique_ptr<IWorkload>();
1036 std::unique_ptr<IWorkload> IWorkloadFactory::CreateDequantize(
1037 const DequantizeQueueDescriptor& descriptor, const WorkloadInfo& info) const
1039 return std::unique_ptr<IWorkload>();
1042 std::unique_ptr<IWorkload> IWorkloadFactory::CreateDetectionPostProcess(
1043 const DetectionPostProcessQueueDescriptor& descriptor, const WorkloadInfo& info) const
1045 return std::unique_ptr<IWorkload>();
1048 std::unique_ptr<IWorkload> IWorkloadFactory::CreateDivision(const DivisionQueueDescriptor& descriptor,
1049 const WorkloadInfo& info) const
1051 return std::unique_ptr<IWorkload>();
1054 std::unique_ptr<IWorkload> IWorkloadFactory::CreateEqual(const EqualQueueDescriptor& descriptor,
1055 const WorkloadInfo& Info) const
1057 return std::unique_ptr<IWorkload>();
1060 std::unique_ptr<IWorkload> IWorkloadFactory::CreateFakeQuantization(const FakeQuantizationQueueDescriptor& descriptor,
1061 const WorkloadInfo& info) const
1063 return std::unique_ptr<IWorkload>();
1066 std::unique_ptr<IWorkload> IWorkloadFactory::CreateFloor(const FloorQueueDescriptor& descriptor,
1067 const WorkloadInfo& info) const
1069 return std::unique_ptr<IWorkload>();
1072 std::unique_ptr<IWorkload> IWorkloadFactory::CreateFullyConnected(const FullyConnectedQueueDescriptor& descriptor,
1073 const WorkloadInfo& info) const
1075 return std::unique_ptr<IWorkload>();
1078 std::unique_ptr<IWorkload> IWorkloadFactory::CreateGather(const GatherQueueDescriptor& descriptor,
1079 const WorkloadInfo& info) const
1081 return std::unique_ptr<IWorkload>();
1084 std::unique_ptr<IWorkload> IWorkloadFactory::CreateGreater(const GreaterQueueDescriptor& descriptor,
1085 const WorkloadInfo& info) const
1087 return std::unique_ptr<IWorkload>();
1090 std::unique_ptr<IWorkload> IWorkloadFactory::CreateL2Normalization(const L2NormalizationQueueDescriptor& descriptor,
1091 const WorkloadInfo& info) const
1093 return std::unique_ptr<IWorkload>();
1096 std::unique_ptr<IWorkload> IWorkloadFactory::CreateLstm(const LstmQueueDescriptor& descriptor,
1097 const WorkloadInfo& info) const
1099 return std::unique_ptr<IWorkload>();
1102 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMaximum(const MaximumQueueDescriptor& descriptor,
1103 const WorkloadInfo& info) const
1105 return std::unique_ptr<IWorkload>();
1108 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMean(const MeanQueueDescriptor& descriptor,
1109 const WorkloadInfo& Info) const
1111 return std::unique_ptr<IWorkload>();
1114 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMemCopy(const MemCopyQueueDescriptor& descriptor,
1115 const WorkloadInfo& info) const
1117 return std::unique_ptr<IWorkload>();
1120 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMemImport(const MemImportQueueDescriptor& descriptor,
1121 const WorkloadInfo& info) const
1123 return std::unique_ptr<IWorkload>();
1126 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMerge(const MergeQueueDescriptor& descriptor,
1127 const WorkloadInfo& info) const
1129 return std::unique_ptr<IWorkload>();
1132 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMerger(const MergerQueueDescriptor& descriptor,
1133 const WorkloadInfo& info) const
1135 return std::unique_ptr<IWorkload>();
1138 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMinimum(const MinimumQueueDescriptor& descriptor,
1139 const WorkloadInfo& info) const
1141 return std::unique_ptr<IWorkload>();
1144 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMultiplication(const MultiplicationQueueDescriptor& descriptor,
1145 const WorkloadInfo& info) const
1147 return std::unique_ptr<IWorkload>();
1150 std::unique_ptr<IWorkload> IWorkloadFactory::CreateNormalization(const NormalizationQueueDescriptor& descriptor,
1151 const WorkloadInfo& info) const
1153 return std::unique_ptr<IWorkload>();
1156 std::unique_ptr<IWorkload> IWorkloadFactory::CreateOutput(const OutputQueueDescriptor& descriptor,
1157 const WorkloadInfo& info) const
1159 return std::unique_ptr<IWorkload>();
1162 std::unique_ptr<IWorkload> IWorkloadFactory::CreatePad(const PadQueueDescriptor& descriptor,
1163 const WorkloadInfo& Info) const
1165 return std::unique_ptr<IWorkload>();
1168 std::unique_ptr<IWorkload> IWorkloadFactory::CreatePermute(const PermuteQueueDescriptor& descriptor,
1169 const WorkloadInfo& info) const
1171 return std::unique_ptr<IWorkload>();
1174 std::unique_ptr<IWorkload> IWorkloadFactory::CreatePooling2d(const Pooling2dQueueDescriptor& descriptor,
1175 const WorkloadInfo& info) const
1177 return std::unique_ptr<IWorkload>();
1180 std::unique_ptr<IWorkload> IWorkloadFactory::CreatePreCompiled(const PreCompiledQueueDescriptor& descriptor,
1181 const WorkloadInfo& info) const
1183 return std::unique_ptr<IWorkload>();
1186 std::unique_ptr<IWorkload> IWorkloadFactory::CreatePrelu(const PreluQueueDescriptor &descriptor,
1187 const WorkloadInfo &info) const
1189 return std::unique_ptr<IWorkload>();
1192 std::unique_ptr<IWorkload> IWorkloadFactory::CreateQuantize(const QuantizeQueueDescriptor& descriptor,
1193 const WorkloadInfo& Info) const
1195 return std::unique_ptr<IWorkload>();
1198 std::unique_ptr<IWorkload> IWorkloadFactory::CreateQuantizedLstm(const QuantizedLstmQueueDescriptor& descriptor,
1199 const WorkloadInfo& info) const
1201 return std::unique_ptr<IWorkload>();
1204 std::unique_ptr<IWorkload> IWorkloadFactory::CreateReshape(const ReshapeQueueDescriptor& descriptor,
1205 const WorkloadInfo& info) const
1207 return std::unique_ptr<IWorkload>();
1210 std::unique_ptr<IWorkload> IWorkloadFactory::CreateResizeBilinear(const ResizeBilinearQueueDescriptor& descriptor,
1211 const WorkloadInfo& info) const
1213 return std::unique_ptr<IWorkload>();
1216 std::unique_ptr<IWorkload> IWorkloadFactory::CreateResize(const ResizeQueueDescriptor& descriptor,
1217 const WorkloadInfo& info) const
1219 return std::unique_ptr<IWorkload>();
1222 std::unique_ptr<IWorkload> IWorkloadFactory::CreateRsqrt(const RsqrtQueueDescriptor& descriptor,
1223 const WorkloadInfo& info) const
1225 return std::unique_ptr<IWorkload>();
1228 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSoftmax(const SoftmaxQueueDescriptor& descriptor,
1229 const WorkloadInfo& info) const
1231 return std::unique_ptr<IWorkload>();
1234 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSplitter(const SplitterQueueDescriptor& descriptor,
1235 const WorkloadInfo& info) const
1237 return std::unique_ptr<IWorkload>();
1240 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSpaceToBatchNd(const SpaceToBatchNdQueueDescriptor& descriptor,
1241 const WorkloadInfo& info) const
1243 return std::unique_ptr<IWorkload>();
1246 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSpaceToDepth(const SpaceToDepthQueueDescriptor& descriptor,
1247 const WorkloadInfo& info) const
1249 return std::unique_ptr<IWorkload>();
1252 std::unique_ptr<IWorkload> IWorkloadFactory::CreateStack(const StackQueueDescriptor& descriptor,
1253 const WorkloadInfo& info) const
1255 return std::unique_ptr<IWorkload>();
1258 std::unique_ptr<IWorkload> IWorkloadFactory::CreateStridedSlice(const StridedSliceQueueDescriptor& descriptor,
1259 const WorkloadInfo& Info) const
1261 return std::unique_ptr<IWorkload>();
1264 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSubtraction(const SubtractionQueueDescriptor& descriptor,
1265 const WorkloadInfo& info) const
1267 return std::unique_ptr<IWorkload>();
1270 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSwitch(const SwitchQueueDescriptor& descriptor,
1271 const WorkloadInfo& info) const
1273 return std::unique_ptr<IWorkload>();
1276 std::unique_ptr<IWorkload> IWorkloadFactory::CreateTransposeConvolution2d(
1277 const TransposeConvolution2dQueueDescriptor& descriptor,
1278 const WorkloadInfo& info) const
1280 return std::unique_ptr<IWorkload>();
1283 } // namepsace armnn