IVGCVSW-4449 Add missing QLstm nullptr checks
[platform/upstream/armnn.git] / src / backends / backendsCommon / WorkloadFactory.cpp
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5
6 #include <Layer.hpp>
7 #include <LayersFwd.hpp>
8
9 #include <armnn/Types.hpp>
10 #include <armnn/LayerSupport.hpp>
11 #include <armnn/ILayerSupport.hpp>
12 #include <armnn/BackendRegistry.hpp>
13 #include <armnn/utility/PolymorphicDowncast.hpp>
14
15 #include <backendsCommon/WorkloadFactory.hpp>
16 #include <armnn/backends/IBackendInternal.hpp>
17 #include <backendsCommon/CpuTensorHandle.hpp>
18 #include <backendsCommon/WorkloadFactory.hpp>
19
20 #include <backendsCommon/test/WorkloadTestUtils.hpp>
21
22 #include <boost/iterator/transform_iterator.hpp>
23
24 #include <cstring>
25 #include <sstream>
26
27 namespace armnn
28 {
29
30 namespace
31 {
32
33 const TensorInfo OverrideDataType(const TensorInfo& info, Optional<DataType> type)
34 {
35     if (!type)
36     {
37         return info;
38     }
39
40     return TensorInfo(info.GetShape(), type.value(), info.GetQuantizationScale(), info.GetQuantizationOffset());
41 }
42
43 } // anonymous namespace
44
45 bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId,
46                                         const IConnectableLayer& connectableLayer,
47                                         Optional<DataType> dataType,
48                                         std::string& outReasonIfUnsupported)
49 {
50     Optional<std::string&> reason = outReasonIfUnsupported;
51     bool result;
52     const Layer& layer = *(PolymorphicDowncast<const Layer*>(&connectableLayer));
53
54     auto const& backendRegistry = BackendRegistryInstance();
55     if (!backendRegistry.IsBackendRegistered(backendId))
56     {
57         std::stringstream ss;
58         ss << connectableLayer.GetName() << " is not supported on " << backendId
59            << " because this backend is not registered.";
60
61         outReasonIfUnsupported = ss.str();
62         return false;
63     }
64
65     auto backendFactory = backendRegistry.GetFactory(backendId);
66     auto backendObject = backendFactory();
67     auto layerSupportObject = backendObject->GetLayerSupport();
68
69     switch(layer.GetType())
70     {
71         case LayerType::Activation:
72         {
73             auto cLayer = PolymorphicDowncast<const ActivationLayer*>(&layer);
74             const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
75             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
76             result = layerSupportObject->IsActivationSupported(
77                                            OverrideDataType(input, dataType),
78                                            OverrideDataType(output, dataType),
79                                            cLayer->GetParameters(),
80                                            reason);
81             break;
82         }
83         case LayerType::Addition:
84         {
85             const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
86             const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
87             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
88             result = layerSupportObject->IsAdditionSupported(
89                                         OverrideDataType(input0, dataType),
90                                         OverrideDataType(input1, dataType),
91                                         OverrideDataType(output, dataType),
92                                         reason);
93             break;
94         }
95         case LayerType::ArgMinMax:
96         {
97             auto cLayer = PolymorphicDowncast<const ArgMinMaxLayer*>(&layer);
98             const ArgMinMaxDescriptor& descriptor = cLayer->GetParameters();
99
100             const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
101             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
102             result = layerSupportObject->IsArgMinMaxSupported(
103                     OverrideDataType(input, dataType),
104                     OverrideDataType(output, DataType::Signed32),
105                     descriptor,
106                     reason);
107             break;
108         }
109         case LayerType::BatchNormalization:
110         {
111             auto cLayer = PolymorphicDowncast<const BatchNormalizationLayer*>(&layer);
112             const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
113             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
114             const TensorInfo& mean = cLayer->m_Mean->GetTensorInfo();
115             const TensorInfo& var = cLayer->m_Variance->GetTensorInfo();
116             const TensorInfo& beta = cLayer->m_Beta->GetTensorInfo();
117             const TensorInfo& gamma = cLayer->m_Gamma->GetTensorInfo();
118             result = layerSupportObject->IsBatchNormalizationSupported(
119                                                    OverrideDataType(input, dataType),
120                                                    OverrideDataType(output, dataType),
121                                                    OverrideDataType(mean, dataType),
122                                                    OverrideDataType(var, dataType),
123                                                    OverrideDataType(beta, dataType),
124                                                    OverrideDataType(gamma, dataType),
125                                                    cLayer->GetParameters(),
126                                                    reason);
127             break;
128         }
129         case LayerType::BatchToSpaceNd:
130         {
131             const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
132             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
133             auto cLayer = PolymorphicDowncast<const BatchToSpaceNdLayer*>(&layer);
134
135             result = layerSupportObject->IsBatchToSpaceNdSupported(OverrideDataType(input, dataType),
136                                                                    OverrideDataType(output, dataType),
137                                                                    cLayer->GetParameters(),
138                                                                    reason);
139             break;
140         }
141         case LayerType::Comparison:
142         {
143             auto cLayer = PolymorphicDowncast<const ComparisonLayer*>(&layer);
144
145             const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
146             const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
147             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
148
149             result = layerSupportObject->IsComparisonSupported(OverrideDataType(input0, dataType),
150                                                                OverrideDataType(input1, dataType),
151                                                                OverrideDataType(output, DataType::Boolean),
152                                                                cLayer->GetParameters(),
153                                                                reason);
154             break;
155         }
156         case LayerType::Constant:
157         {
158             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
159             result = layerSupportObject->IsConstantSupported(OverrideDataType(output, dataType), reason);
160             break;
161         }
162         case LayerType::ConvertBf16ToFp32:
163         {
164             const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
165             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
166             result = layerSupportObject->IsConvertBf16ToFp32Supported(input, output, reason);
167             break;
168         }
169         case LayerType::ConvertFp16ToFp32:
170         {
171             const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
172             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
173             result = layerSupportObject->IsConvertFp16ToFp32Supported(input, output, reason);
174             break;
175         }
176         case LayerType::ConvertFp32ToBf16:
177         {
178             const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
179             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
180             result = layerSupportObject->IsConvertFp32ToBf16Supported(input, output, reason);
181             break;
182         }
183         case LayerType::ConvertFp32ToFp16:
184         {
185             const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
186             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
187             result = layerSupportObject->IsConvertFp32ToFp16Supported(input, output, reason);
188             break;
189         }
190         case LayerType::Convolution2d:
191         {
192             auto cLayer = PolymorphicDowncast<const Convolution2dLayer*>(&layer);
193
194             const TensorInfo input  = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(),
195                                                        dataType);
196             const TensorInfo output = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
197             ARMNN_ASSERT(cLayer->m_Weight.get() != nullptr);
198
199             const Convolution2dDescriptor& descriptor  = cLayer->GetParameters();
200
201             // Construct optional biases object based on the value of m_BiasEnabled
202             Optional<TensorInfo> biases;
203             if (descriptor.m_BiasEnabled)
204             {
205                 biases =
206                     OverrideDataType(cLayer->m_Bias->GetTensorInfo(), GetBiasTypeFromWeightsType(dataType));
207             }
208
209             result = layerSupportObject->IsConvolution2dSupported(
210                                               input,
211                                               output,
212                                               descriptor,
213                                               OverrideDataType(cLayer->m_Weight->GetTensorInfo(), dataType),
214                                               biases,
215                                               reason);
216             break;
217         }
218         case LayerType::Debug:
219         {
220             const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
221             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
222
223             result = layerSupportObject->IsDebugSupported(OverrideDataType(input, dataType),
224                                                           OverrideDataType(output, dataType),
225                                                           reason);
226             break;
227         }
228         case LayerType::DepthToSpace:
229         {
230             auto cLayer = PolymorphicDowncast<const DepthToSpaceLayer*>(&layer);
231
232             const TensorInfo& input  = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
233             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
234
235             result = layerSupportObject->IsDepthToSpaceSupported(OverrideDataType(input, dataType),
236                                                                  OverrideDataType(output, dataType),
237                                                                  cLayer->GetParameters(),
238                                                                  reason);
239             break;
240         }
241         case LayerType::DepthwiseConvolution2d:
242         {
243             auto cLayer = PolymorphicDowncast<const DepthwiseConvolution2dLayer*>(&layer);
244             const TensorInfo& input = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(),
245                                                        dataType);
246             const TensorInfo& output = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
247             ARMNN_ASSERT(cLayer->m_Weight.get() != nullptr);
248
249             const DepthwiseConvolution2dDescriptor& descriptor = cLayer->GetParameters();
250
251             // Construct optional biases object based on the value of m_BiasEnabled
252             Optional<TensorInfo> biases;
253             if (descriptor.m_BiasEnabled)
254             {
255                 biases =
256                     OverrideDataType(cLayer->m_Bias->GetTensorInfo(), GetBiasTypeFromWeightsType(dataType));
257             }
258
259             result = layerSupportObject->IsDepthwiseConvolutionSupported(
260                                                      input,
261                                                      output,
262                                                      descriptor,
263                                                      OverrideDataType(cLayer->m_Weight->GetTensorInfo(), dataType),
264                                                      biases,
265                                                      reason);
266             break;
267         }
268         case LayerType::Dequantize:
269         {
270             const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
271             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
272
273             result = layerSupportObject->IsDequantizeSupported(input,
274                                                                OverrideDataType(output, dataType),
275                                                                reason);
276             break;
277         }
278         case LayerType::DetectionPostProcess:
279         {
280             auto cLayer = PolymorphicDowncast<const DetectionPostProcessLayer*>(&layer);
281             const TensorInfo& boxEncodings = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
282             const TensorInfo& scores = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
283             const TensorInfo& anchors = cLayer->m_Anchors->GetTensorInfo();
284
285             const TensorInfo& detectionBoxes = layer.GetOutputSlot(0).GetTensorInfo();
286             const TensorInfo& detectionClasses = layer.GetOutputSlot(1).GetTensorInfo();
287             const TensorInfo& detectionScores = layer.GetOutputSlot(2).GetTensorInfo();
288             const TensorInfo& numDetections = layer.GetOutputSlot(3).GetTensorInfo();
289
290             const DetectionPostProcessDescriptor& descriptor = cLayer->GetParameters();
291             result = layerSupportObject->IsDetectionPostProcessSupported(boxEncodings,
292                                                                          scores,
293                                                                          anchors,
294                                                                          detectionBoxes,
295                                                                          detectionClasses,
296                                                                          detectionScores,
297                                                                          numDetections,
298                                                                          descriptor,
299                                                                          reason);
300             break;
301         }
302         case LayerType::ElementwiseUnary:
303         {
304             auto cLayer = PolymorphicDowncast<const ElementwiseUnaryLayer*>(&layer);
305
306             const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
307             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
308
309             result = layerSupportObject->IsElementwiseUnarySupported(OverrideDataType(input, dataType),
310                                                                      OverrideDataType(output, dataType),
311                                                                      cLayer->GetParameters(),
312                                                                      reason);
313             break;
314         }
315         case LayerType::FakeQuantization:
316         {
317             auto cLayer = PolymorphicDowncast<const FakeQuantizationLayer*>(&layer);
318             const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
319             result = layerSupportObject->IsFakeQuantizationSupported(OverrideDataType(input, dataType),
320                                                                      cLayer->GetParameters(),
321                                                                      reason);
322             break;
323         }
324         case LayerType::Floor:
325         {
326             const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
327             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
328             result = layerSupportObject->IsFloorSupported(OverrideDataType(input, dataType),
329                                                           OverrideDataType(output, dataType),
330                                                           reason);
331             break;
332         }
333         case LayerType::FullyConnected:
334         {
335             auto cLayer = PolymorphicDowncast<const FullyConnectedLayer*>(&layer);
336             const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
337             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
338             ARMNN_ASSERT(cLayer->m_Weight.get() != nullptr);
339
340             TensorInfo biasInfo;
341             const TensorInfo * biasInfoPtr = nullptr;
342             static const TensorInfo dummyBFloat16Bias(TensorShape({1,1,1,1}), DataType::BFloat16);
343             static const TensorInfo dummyFloat16Bias(TensorShape({1,1,1,1}), DataType::Float16);
344             static const TensorInfo dummyFloat32Bias(TensorShape({1,1,1,1}), DataType::Float32);
345             static const TensorInfo dummyQA8Bias(TensorShape({1,1,1,1}), DataType::Signed32);
346
347             const FullyConnectedDescriptor& descriptor = cLayer->GetParameters();
348             if (descriptor.m_BiasEnabled)
349             {
350                 ARMNN_ASSERT(cLayer->m_Bias.get() != nullptr);
351                 biasInfo = OverrideDataType(cLayer->m_Bias->GetTensorInfo(), GetBiasTypeFromWeightsType(dataType));
352                 biasInfoPtr = &biasInfo;
353             }
354             else
355             {
356                 // If biases are not enabled pass a dummy tensorinfo for the validation
357                 switch(input.GetDataType())
358                 {
359                     case DataType::BFloat16:
360                     {
361                         biasInfoPtr = &dummyBFloat16Bias;
362                         break;
363                     }
364                     case DataType::Float16:
365                     {
366                         biasInfoPtr = &dummyFloat16Bias;
367                         break;
368                     }
369                     case DataType::Float32:
370                     {
371                         biasInfoPtr = &dummyFloat32Bias;
372                         break;
373                     }
374                     case DataType::QAsymmU8:
375                     case DataType::QAsymmS8:
376                     case DataType::QSymmS8:
377                     case DataType::QSymmS16:
378                     {
379                         biasInfoPtr = &dummyQA8Bias;
380                         break;
381                     }
382                     default:
383                     {
384                         ARMNN_ASSERT_MSG(false, "Unexpected bias type");
385                     }
386                 }
387             }
388
389             result = layerSupportObject->IsFullyConnectedSupported(
390                                                OverrideDataType(input, dataType),
391                                                OverrideDataType(output, dataType),
392                                                OverrideDataType(cLayer->m_Weight->GetTensorInfo(), dataType),
393                                                *biasInfoPtr,
394                                                descriptor,
395                                                reason);
396             break;
397         }
398         case LayerType::Gather:
399         {
400             const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
401             const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
402             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
403             result = layerSupportObject->IsGatherSupported(OverrideDataType(input0, dataType),
404                                                            input1,
405                                                            OverrideDataType(output, dataType),
406                                                            reason);
407             break;
408         }
409         case LayerType::Input:
410         {
411             const TensorInfo& input = layer.GetOutputSlot(0).GetTensorInfo();
412             result = layerSupportObject->IsInputSupported(OverrideDataType(input, dataType), reason);
413             break;
414         }
415         case LayerType::InstanceNormalization:
416         {
417             auto cLayer = PolymorphicDowncast<const InstanceNormalizationLayer*>(&layer);
418             const InstanceNormalizationDescriptor& descriptor = cLayer->GetParameters();
419
420             const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
421             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
422
423             result = layerSupportObject->IsInstanceNormalizationSupported(
424                 OverrideDataType(input, dataType),
425                 OverrideDataType(output, dataType),
426                 descriptor,
427                 reason);
428             break;
429         }
430         case LayerType::L2Normalization:
431         {
432             auto cLayer = PolymorphicDowncast<const L2NormalizationLayer*>(&layer);
433             const L2NormalizationDescriptor& descriptor = cLayer->GetParameters();
434
435             const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
436             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
437
438             result = layerSupportObject->IsL2NormalizationSupported(
439                                                 OverrideDataType(input, dataType),
440                                                 OverrideDataType(output, dataType),
441                                                 descriptor,
442                                                 reason);
443             break;
444         }
445         case LayerType::LogSoftmax:
446         {
447             auto cLayer = PolymorphicDowncast<const LogSoftmaxLayer*>(&layer);
448
449             const TensorInfo& input  = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
450             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
451
452             result = layerSupportObject->IsLogSoftmaxSupported(OverrideDataType(input, dataType),
453                                                                OverrideDataType(output, dataType),
454                                                                cLayer->GetParameters(),
455                                                                reason);
456             break;
457         }
458         case LayerType::Lstm:
459         {
460             auto cLayer = PolymorphicDowncast<const LstmLayer*>(&layer);
461             const LstmDescriptor& descriptor = cLayer->GetParameters();
462
463             // All inputs.
464             const TensorInfo& input = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(),
465                                                        dataType);
466             const TensorInfo& outputStateIn = OverrideDataType(layer.GetInputSlot(1).GetConnection()->GetTensorInfo(),
467                                                                dataType);
468             const TensorInfo& cellStateIn = OverrideDataType(layer.GetInputSlot(2).GetConnection()->GetTensorInfo(),
469                                                              dataType);
470             // All outputs
471             const TensorInfo& scratchBuffer = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
472             const TensorInfo& outputStateOut = OverrideDataType(layer.GetOutputSlot(1).GetTensorInfo(), dataType);
473             const TensorInfo& cellStateOut = OverrideDataType(layer.GetOutputSlot(2).GetTensorInfo(), dataType);
474             const TensorInfo& output = OverrideDataType(layer.GetOutputSlot(3).GetTensorInfo(), dataType);
475
476             // Basic parameters
477             const TensorInfo& inputToForgetWeights
478                     = OverrideDataType(cLayer->m_BasicParameters.m_InputToForgetWeights->GetTensorInfo(), dataType);
479             const TensorInfo& inputToCellWeights
480                     = OverrideDataType(cLayer->m_BasicParameters.m_InputToCellWeights->GetTensorInfo(), dataType);
481             const TensorInfo& inputToOutputWeights
482                     = OverrideDataType(cLayer->m_BasicParameters.m_InputToOutputWeights->GetTensorInfo(), dataType);
483             const TensorInfo& recurrentToForgetWeights
484                     = OverrideDataType(cLayer->m_BasicParameters.m_RecurrentToForgetWeights->GetTensorInfo(), dataType);
485             const TensorInfo& recurrentToCellWeights
486                     = OverrideDataType(cLayer->m_BasicParameters.m_RecurrentToCellWeights->GetTensorInfo(), dataType);
487             const TensorInfo& recurrentToOutputWeights
488                     = OverrideDataType(cLayer->m_BasicParameters.m_RecurrentToOutputWeights->GetTensorInfo(), dataType);
489             const TensorInfo& forgetGateBias
490                     = OverrideDataType(cLayer->m_BasicParameters.m_ForgetGateBias->GetTensorInfo(), dataType);
491             const TensorInfo& cellBias
492                     = OverrideDataType(cLayer->m_BasicParameters.m_CellBias->GetTensorInfo(), dataType);
493             const TensorInfo& outputGateBias
494                     = OverrideDataType(cLayer->m_BasicParameters.m_OutputGateBias->GetTensorInfo(), dataType);
495
496             LstmInputParamsInfo paramsInfo;
497
498             paramsInfo.m_InputToForgetWeights     = &inputToForgetWeights;
499             paramsInfo.m_InputToCellWeights       = &inputToCellWeights;
500             paramsInfo.m_InputToOutputWeights     = &inputToOutputWeights;
501             paramsInfo.m_RecurrentToForgetWeights = &recurrentToForgetWeights;
502             paramsInfo.m_RecurrentToCellWeights   = &recurrentToCellWeights;
503             paramsInfo.m_RecurrentToOutputWeights = &recurrentToOutputWeights;
504             paramsInfo.m_ForgetGateBias           = &forgetGateBias;
505             paramsInfo.m_CellBias                 = &cellBias;
506             paramsInfo.m_OutputGateBias           = &outputGateBias;
507
508
509             // Optional parameters
510             TensorInfo optInputToInputWeights;
511             TensorInfo optRecurrentToInputWeights;
512             TensorInfo optCellToInputWeights;
513             TensorInfo optInputGateBias;
514             TensorInfo optProjectionWeights;
515             TensorInfo optProjectionBias;
516             TensorInfo optCellToForgetWeights;
517             TensorInfo optCellToOutputWeights;
518             TensorInfo optInputLayerNormWeights;
519             TensorInfo optForgetLayerNormWeights;
520             TensorInfo optCellLayerNormWeights;
521             TensorInfo optOutputLayerNormWeights;
522
523             if(!descriptor.m_CifgEnabled)
524             {
525                 optInputToInputWeights =
526                     OverrideDataType(cLayer->m_CifgParameters.m_InputToInputWeights->GetTensorInfo(), dataType);
527                 paramsInfo.m_InputToInputWeights = &optInputToInputWeights;
528
529                 optRecurrentToInputWeights =
530                     OverrideDataType(cLayer->m_CifgParameters.m_RecurrentToInputWeights->GetTensorInfo(), dataType);
531                 paramsInfo.m_RecurrentToInputWeights = &optRecurrentToInputWeights;
532                 optInputGateBias =
533                        OverrideDataType(cLayer->m_CifgParameters.m_InputGateBias->GetTensorInfo(), dataType);
534                 paramsInfo.m_InputGateBias = &optInputGateBias;
535             }
536
537             if(descriptor.m_ProjectionEnabled)
538             {
539                 optProjectionWeights =
540                     OverrideDataType(cLayer->m_ProjectionParameters.m_ProjectionWeights->GetTensorInfo(), dataType);
541                 paramsInfo.m_ProjectionWeights = &optProjectionWeights;
542                 if (cLayer->m_ProjectionParameters.m_ProjectionBias != nullptr)
543                 {
544                     optProjectionBias =
545                         OverrideDataType(cLayer->m_ProjectionParameters.m_ProjectionBias->GetTensorInfo(), dataType);
546                     paramsInfo.m_ProjectionBias = &optProjectionBias;
547                 }
548             }
549
550             if(descriptor.m_PeepholeEnabled)
551             {
552                 if(!descriptor.m_CifgEnabled)
553                 {
554                     optCellToInputWeights =
555                             OverrideDataType(cLayer->m_PeepholeParameters.m_CellToInputWeights->GetTensorInfo(),
556                                              dataType);
557                     paramsInfo.m_CellToInputWeights = &optCellToInputWeights;
558                 }
559                 optCellToForgetWeights =
560                     OverrideDataType(cLayer->m_PeepholeParameters.m_CellToForgetWeights->GetTensorInfo(), dataType);
561                 paramsInfo.m_CellToForgetWeights = &optCellToForgetWeights;
562                 optCellToOutputWeights =
563                     OverrideDataType(cLayer->m_PeepholeParameters.m_CellToOutputWeights->GetTensorInfo(), dataType);
564                 paramsInfo.m_CellToOutputWeights = &optCellToOutputWeights;
565             }
566
567             if(descriptor.m_LayerNormEnabled)
568             {
569                 if (!descriptor.m_CifgEnabled)
570                 {
571                     optInputLayerNormWeights = OverrideDataType(
572                             cLayer->m_LayerNormParameters.m_InputLayerNormWeights->GetTensorInfo(), dataType);
573                     paramsInfo.m_InputLayerNormWeights = &optInputLayerNormWeights;
574                 }
575
576                 optForgetLayerNormWeights = OverrideDataType(
577                         cLayer->m_LayerNormParameters.m_ForgetLayerNormWeights->GetTensorInfo(), dataType);
578                 paramsInfo.m_ForgetLayerNormWeights = &optForgetLayerNormWeights;
579
580                 optCellLayerNormWeights = OverrideDataType(
581                         cLayer->m_LayerNormParameters.m_CellLayerNormWeights->GetTensorInfo(), dataType);
582                 paramsInfo.m_CellLayerNormWeights = &optCellLayerNormWeights;
583
584                 optOutputLayerNormWeights = OverrideDataType(
585                         cLayer->m_LayerNormParameters.m_OutputLayerNormWeights->GetTensorInfo(), dataType);
586                 paramsInfo.m_OutputLayerNormWeights = &optOutputLayerNormWeights;
587             }
588
589             result = layerSupportObject->IsLstmSupported(
590                                      input,
591                                      outputStateIn,
592                                      cellStateIn,
593                                      scratchBuffer,
594                                      outputStateOut,
595                                      cellStateOut,
596                                      output,
597                                      descriptor,
598                                      paramsInfo,
599                                      reason);
600             break;
601         }
602         case LayerType::Maximum:
603         {
604             const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
605             const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
606             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
607
608             result = layerSupportObject->IsMaximumSupported(OverrideDataType(input0, dataType),
609                                                             OverrideDataType(input1, dataType),
610                                                             OverrideDataType(output, dataType),
611                                                             reason);
612             break;
613         }
614         case LayerType::MemCopy:
615         {
616             const TensorInfo& input  = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
617             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
618
619             result = layerSupportObject->IsMemCopySupported(OverrideDataType(input, dataType),
620                                                             OverrideDataType(output, dataType),
621                                                             reason);
622             break;
623         }
624         case LayerType::MemImport:
625         {
626             const TensorInfo& input  = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
627             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
628
629             result = layerSupportObject->IsMemImportSupported(OverrideDataType(input, dataType),
630                                                               OverrideDataType(output, dataType),
631                                                               reason);
632             break;
633         }
634         case LayerType::Merge:
635         {
636             const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
637             const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
638             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
639
640             result = layerSupportObject->IsMergeSupported(OverrideDataType(input0, dataType),
641                                                           OverrideDataType(input1, dataType),
642                                                           OverrideDataType(output, dataType),
643                                                           reason);
644             break;
645         }
646         case LayerType::Concat:
647         {
648             auto cLayer = PolymorphicDowncast<const ConcatLayer*>(&layer);
649
650             // Get vector of all inputs.
651             auto getTensorInfo = [&dataType](const InputSlot& slot)
652                 {
653                     return OverrideDataType(slot.GetConnectedOutputSlot()->GetTensorInfo(), dataType);
654                 };
655             auto beginI = boost::make_transform_iterator(layer.GetInputSlots().begin(), getTensorInfo);
656             auto endI = boost::make_transform_iterator(layer.GetInputSlots().end(), getTensorInfo);
657             std::vector<TensorInfo> inputs(beginI, endI);
658
659             auto getTensorInfoPtr = [](const TensorInfo& info)
660                 {
661                     return &info;
662                 };
663             auto beginPtr = boost::make_transform_iterator(inputs.begin(), getTensorInfoPtr);
664             auto endPtr = boost::make_transform_iterator(inputs.end(), getTensorInfoPtr);
665             std::vector<const TensorInfo*> inputPtrs(beginPtr, endPtr);
666
667             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
668
669             result = layerSupportObject->IsConcatSupported(inputPtrs, output, cLayer->GetParameters(), reason);
670
671
672             break;
673         }
674         case LayerType::Multiplication:
675         {
676             const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
677             const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
678             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
679             result = layerSupportObject->IsMultiplicationSupported(
680                                                OverrideDataType(input0, dataType),
681                                                OverrideDataType(input1, dataType),
682                                                OverrideDataType(output, dataType),
683                                                reason);
684             break;
685         }
686         case LayerType::Normalization:
687         {
688             auto cLayer = PolymorphicDowncast<const NormalizationLayer*>(&layer);
689             const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
690             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
691             result = layerSupportObject->IsNormalizationSupported(OverrideDataType(input, dataType),
692                                                                   OverrideDataType(output, dataType),
693                                                                   cLayer->GetParameters(),
694                                                                   reason);
695             break;
696         }
697         case LayerType::Output:
698         {
699             const TensorInfo& output = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
700             result = layerSupportObject->IsOutputSupported(OverrideDataType(output, dataType), reason);
701             break;
702         }
703         case LayerType::Permute:
704         {
705             auto cLayer = PolymorphicDowncast<const PermuteLayer*>(&layer);
706             const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
707             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
708             result = layerSupportObject->IsPermuteSupported(OverrideDataType(input, dataType),
709                                                             OverrideDataType(output, dataType),
710                                                             cLayer->GetParameters(),
711                                                             reason);
712             break;
713         }
714         case LayerType::Pad:
715         {
716             auto cLayer = PolymorphicDowncast<const PadLayer*>(&layer);
717             const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
718             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
719             result = layerSupportObject->IsPadSupported(
720                                     OverrideDataType(input, dataType),
721                                     OverrideDataType(output, dataType),
722                                     cLayer->GetParameters(),
723                                     reason);
724             break;
725         }
726         case LayerType::Pooling2d:
727         {
728             auto cLayer = PolymorphicDowncast<const Pooling2dLayer*>(&layer);
729             const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
730             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
731             result = layerSupportObject->IsPooling2dSupported(OverrideDataType(input, dataType),
732                                                               OverrideDataType(output, dataType),
733                                                               cLayer->GetParameters(),
734                                                               reason);
735             break;
736         }
737         case LayerType::PreCompiled:
738         {
739             auto cLayer = PolymorphicDowncast<const PreCompiledLayer*>(&layer);
740             const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
741             result = layerSupportObject->IsPreCompiledSupported(OverrideDataType(input, dataType),
742                                                                 cLayer->GetParameters(),
743                                                                 reason);
744             break;
745         }
746         case LayerType::Quantize:
747         {
748             const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
749             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
750             result = layerSupportObject->IsQuantizeSupported(input, output, reason);
751             break;
752         }
753         case LayerType::QLstm:
754         {
755             auto cLayer = PolymorphicDowncast<const QLstmLayer*>(&layer);
756             const QLstmDescriptor& descriptor = cLayer->GetParameters();
757
758             // Inputs
759             const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
760             const TensorInfo& previousOutputIn = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
761             const TensorInfo& previousCellStateIn = layer.GetInputSlot(2).GetConnection()->GetTensorInfo();
762
763             // Outputs
764             const TensorInfo& outputStateOut = layer.GetOutputSlot(0).GetTensorInfo();
765             const TensorInfo& cellStateOut = layer.GetOutputSlot(1).GetTensorInfo();
766             const TensorInfo& output = layer.GetOutputSlot(2).GetTensorInfo();
767
768             // Lstm parameters
769             LstmInputParamsInfo paramsInfo;
770
771             // Basic parameters
772             paramsInfo.m_InputToForgetWeights = &cLayer->m_BasicParameters.m_InputToForgetWeights->GetTensorInfo();
773             paramsInfo.m_InputToCellWeights   = &cLayer->m_BasicParameters.m_InputToCellWeights->GetTensorInfo();
774             paramsInfo.m_InputToOutputWeights = &cLayer->m_BasicParameters.m_InputToOutputWeights->GetTensorInfo();
775
776             paramsInfo.m_RecurrentToForgetWeights =
777                     &cLayer->m_BasicParameters.m_RecurrentToForgetWeights->GetTensorInfo();
778             paramsInfo.m_RecurrentToCellWeights   =
779                     &cLayer->m_BasicParameters.m_RecurrentToCellWeights->GetTensorInfo();
780             paramsInfo.m_RecurrentToOutputWeights =
781                     &cLayer->m_BasicParameters.m_RecurrentToOutputWeights->GetTensorInfo();
782
783             paramsInfo.m_ForgetGateBias = &cLayer->m_BasicParameters.m_ForgetGateBias->GetTensorInfo();
784             paramsInfo.m_CellBias       = &cLayer->m_BasicParameters.m_CellBias->GetTensorInfo();
785             paramsInfo.m_OutputGateBias = &cLayer->m_BasicParameters.m_OutputGateBias->GetTensorInfo();
786
787             if(!descriptor.m_CifgEnabled)
788             {
789                 paramsInfo.m_InputToInputWeights = &cLayer->m_CifgParameters.m_InputToInputWeights->GetTensorInfo();
790                 paramsInfo.m_RecurrentToInputWeights =
791                         &cLayer->m_CifgParameters.m_RecurrentToInputWeights->GetTensorInfo();
792                 paramsInfo.m_InputGateBias = &cLayer->m_CifgParameters.m_InputGateBias->GetTensorInfo();
793             }
794
795             if(descriptor.m_ProjectionEnabled)
796             {
797                 paramsInfo.m_ProjectionWeights = &cLayer->m_ProjectionParameters.m_ProjectionWeights->GetTensorInfo();
798
799                 // Projection bias is optional even if projection is enabled
800                 if (cLayer->m_ProjectionParameters.m_ProjectionBias != nullptr)
801                 {
802                     paramsInfo.m_ProjectionBias = &cLayer->m_ProjectionParameters.m_ProjectionBias->GetTensorInfo();
803                 }
804             }
805
806             if(descriptor.m_PeepholeEnabled)
807             {
808                 if (!descriptor.m_CifgEnabled)
809                 {
810                     paramsInfo.m_CellToInputWeights =
811                             &cLayer->m_PeepholeParameters.m_CellToInputWeights->GetTensorInfo();
812                 }
813
814                 paramsInfo.m_CellToForgetWeights =
815                         &cLayer->m_PeepholeParameters.m_CellToForgetWeights->GetTensorInfo();
816                 paramsInfo.m_CellToOutputWeights = &cLayer->m_PeepholeParameters.m_CellToOutputWeights->GetTensorInfo();
817             }
818
819             if(descriptor.m_LayerNormEnabled)
820             {
821                 if (!descriptor.m_CifgEnabled)
822                 {
823                     paramsInfo.m_InputLayerNormWeights =
824                             &cLayer->m_LayerNormParameters.m_InputLayerNormWeights->GetTensorInfo();
825                 }
826
827                 paramsInfo.m_ForgetLayerNormWeights =
828                         &cLayer->m_LayerNormParameters.m_ForgetLayerNormWeights->GetTensorInfo();
829                 paramsInfo.m_CellLayerNormWeights =
830                         &cLayer->m_LayerNormParameters.m_CellLayerNormWeights->GetTensorInfo();
831                 paramsInfo.m_OutputLayerNormWeights =
832                         &cLayer->m_LayerNormParameters.m_OutputLayerNormWeights->GetTensorInfo();
833             }
834
835             result = layerSupportObject->IsQLstmSupported(input,
836                                                           previousOutputIn,
837                                                           previousCellStateIn,
838                                                           outputStateOut,
839                                                           cellStateOut,
840                                                           output,
841                                                           descriptor,
842                                                           paramsInfo,
843                                                           reason);
844             break;
845         }
846         case LayerType::QuantizedLstm:
847         {
848             auto cLayer = PolymorphicDowncast<const QuantizedLstmLayer*>(&layer);
849
850             // Inputs
851             const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
852             const TensorInfo& previousCellStateIn = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
853             const TensorInfo& previousOutputIn = layer.GetInputSlot(2).GetConnection()->GetTensorInfo();
854
855             // Outputs
856             const TensorInfo& cellStateOut = layer.GetOutputSlot(0).GetTensorInfo();
857             const TensorInfo& output = layer.GetOutputSlot(1).GetTensorInfo();
858
859             // QuantizedLstm parameters
860             QuantizedLstmInputParamsInfo paramsInfo;
861
862             paramsInfo.m_InputToInputWeights      =
863                     &cLayer->m_QuantizedLstmParameters.m_InputToInputWeights->GetTensorInfo();
864             paramsInfo.m_InputToForgetWeights     =
865                     &cLayer->m_QuantizedLstmParameters.m_InputToForgetWeights->GetTensorInfo();
866             paramsInfo.m_InputToCellWeights       =
867                     &cLayer->m_QuantizedLstmParameters.m_InputToCellWeights->GetTensorInfo();
868             paramsInfo.m_InputToOutputWeights     =
869                     &cLayer->m_QuantizedLstmParameters.m_InputToOutputWeights->GetTensorInfo();
870
871             paramsInfo.m_RecurrentToInputWeights  =
872                     &cLayer->m_QuantizedLstmParameters.m_RecurrentToInputWeights->GetTensorInfo();
873             paramsInfo.m_RecurrentToForgetWeights =
874                     &cLayer->m_QuantizedLstmParameters.m_RecurrentToForgetWeights->GetTensorInfo();
875             paramsInfo.m_RecurrentToCellWeights   =
876                     &cLayer->m_QuantizedLstmParameters.m_RecurrentToCellWeights->GetTensorInfo();
877             paramsInfo.m_RecurrentToOutputWeights =
878                     &cLayer->m_QuantizedLstmParameters.m_RecurrentToOutputWeights->GetTensorInfo();
879
880             paramsInfo.m_InputGateBias            =
881                     &cLayer->m_QuantizedLstmParameters.m_InputGateBias->GetTensorInfo();
882             paramsInfo.m_ForgetGateBias           =
883                     &cLayer->m_QuantizedLstmParameters.m_ForgetGateBias->GetTensorInfo();
884             paramsInfo.m_CellBias                 =
885                     &cLayer->m_QuantizedLstmParameters.m_CellBias->GetTensorInfo();
886             paramsInfo.m_OutputGateBias           =
887                     &cLayer->m_QuantizedLstmParameters.m_OutputGateBias->GetTensorInfo();;
888
889             result = layerSupportObject->IsQuantizedLstmSupported(input,
890                                                                   previousCellStateIn,
891                                                                   previousOutputIn,
892                                                                   cellStateOut,
893                                                                   output,
894                                                                   paramsInfo,
895                                                                   reason);
896             break;
897         }
898         case LayerType::Division:
899         {
900             const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
901             const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
902             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
903             result = layerSupportObject->IsDivisionSupported(
904                                          OverrideDataType(input0, dataType),
905                                          OverrideDataType(input1, dataType),
906                                          OverrideDataType(output, dataType),
907                                          reason);
908             break;
909         }
910         case LayerType::Reshape:
911         {
912             auto cLayer = PolymorphicDowncast<const ReshapeLayer*>(&layer);
913             const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
914             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
915             result = layerSupportObject->IsReshapeSupported(OverrideDataType(input, dataType),
916                                                             OverrideDataType(output, dataType),
917                                                             cLayer->GetParameters(),
918                                                             reason);
919             break;
920         }
921         case LayerType::Resize:
922         {
923             auto cLayer = PolymorphicDowncast<const ResizeLayer*>(&layer);
924             const TensorInfo& input  = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
925             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
926             result = layerSupportObject->IsResizeSupported(OverrideDataType(input, dataType),
927                                                            OverrideDataType(output, dataType),
928                                                            cLayer->GetParameters(),
929                                                            reason);
930             break;
931         }
932         case LayerType::Slice:
933         {
934             auto cLayer = PolymorphicDowncast<const SliceLayer*>(&layer);
935
936             const TensorInfo& input  = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
937             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
938
939             result = layerSupportObject->IsSliceSupported(OverrideDataType(input, dataType),
940                                                           OverrideDataType(output, dataType),
941                                                           cLayer->GetParameters(),
942                                                           reason);
943             break;
944         }
945         case LayerType::Softmax:
946         {
947             auto cLayer = PolymorphicDowncast<const SoftmaxLayer*>(&layer);
948             const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
949             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
950             result = layerSupportObject->IsSoftmaxSupported(OverrideDataType(input, dataType),
951                                                             OverrideDataType(output, dataType),
952                                                             cLayer->GetParameters(),
953                                                             reason);
954             break;
955         }
956         case LayerType::SpaceToBatchNd:
957         {
958             auto cLayer = PolymorphicDowncast<const SpaceToBatchNdLayer*>(&layer);
959             const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
960             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
961             result = layerSupportObject->IsSpaceToBatchNdSupported(OverrideDataType(input, dataType),
962                                                                    OverrideDataType(output, dataType),
963                                                                    cLayer->GetParameters(),
964                                                                    reason);
965             break;
966         }
967         case LayerType::SpaceToDepth:
968         {
969             auto cLayer = PolymorphicDowncast<const SpaceToDepthLayer*>(&layer);
970
971             const TensorInfo& input  = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
972             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
973
974             result = layerSupportObject->IsSpaceToDepthSupported(OverrideDataType(input, dataType),
975                                                                  OverrideDataType(output, dataType),
976                                                                  cLayer->GetParameters(),
977                                                                  reason);
978             break;
979         }
980         case LayerType::Splitter:
981         {
982             auto cLayer = PolymorphicDowncast<const SplitterLayer*>(&layer);
983             const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
984
985             // Get vector of all outputs.
986             auto getTensorInfo = [&dataType](const OutputSlot& slot)
987             {
988                 return OverrideDataType(slot.GetTensorInfo(), dataType);
989             };
990             auto beginI = boost::make_transform_iterator(layer.GetOutputSlots().begin(), getTensorInfo);
991             auto endI = boost::make_transform_iterator(layer.GetOutputSlots().end(), getTensorInfo);
992             std::vector<TensorInfo> outputs(beginI, endI);
993
994             const std::vector<std::reference_wrapper<TensorInfo>> outputPtrs(outputs.begin(), outputs.end());
995
996             result = layerSupportObject->IsSplitterSupported(OverrideDataType(input, dataType),
997                                                              outputPtrs,
998                                                              cLayer->GetParameters(),
999                                                              reason);
1000             break;
1001         }
1002         case LayerType::Stack:
1003         {
1004             auto cLayer = PolymorphicDowncast<const StackLayer*>(&layer);
1005
1006             // Get vector of all inputs.
1007             auto getTensorInfo = [&dataType](const InputSlot& slot)
1008                 {
1009                     return OverrideDataType(slot.GetConnectedOutputSlot()->GetTensorInfo(), dataType);
1010                 };
1011             auto beginI = boost::make_transform_iterator(layer.GetInputSlots().begin(), getTensorInfo);
1012             auto endI = boost::make_transform_iterator(layer.GetInputSlots().end(), getTensorInfo);
1013             std::vector<TensorInfo> inputs(beginI, endI);
1014
1015             auto getTensorInfoPtr = [](const TensorInfo& info)
1016                 {
1017                     return &info;
1018                 };
1019             auto beginPtr = boost::make_transform_iterator(inputs.begin(), getTensorInfoPtr);
1020             auto endPtr = boost::make_transform_iterator(inputs.end(), getTensorInfoPtr);
1021             std::vector<const TensorInfo*> inputPtrs(beginPtr, endPtr);
1022
1023             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1024
1025             result = layerSupportObject->IsStackSupported(inputPtrs, output, cLayer->GetParameters(), reason);
1026
1027             break;
1028         }
1029         case LayerType::StandIn:
1030         {
1031             auto cLayer = PolymorphicDowncast<const StandInLayer*>(&layer);
1032
1033             // Get vector of all inputs.
1034             auto getTensorInfoIn = [&dataType](const InputSlot& slot)
1035                 {
1036                     return OverrideDataType(slot.GetConnectedOutputSlot()->GetTensorInfo(), dataType);
1037                 };
1038             auto getTensorInfoOut = [&dataType](const OutputSlot& slot)
1039                 {
1040                     return OverrideDataType(slot.GetTensorInfo(), dataType);
1041                 };
1042             auto beginI = boost::make_transform_iterator(layer.GetInputSlots().begin(), getTensorInfoIn);
1043             auto endI = boost::make_transform_iterator(layer.GetInputSlots().end(), getTensorInfoIn);
1044             std::vector<TensorInfo> inputs(beginI, endI);
1045
1046             auto beginO = boost::make_transform_iterator(layer.GetOutputSlots().begin(), getTensorInfoOut);
1047             auto endO = boost::make_transform_iterator(layer.GetOutputSlots().end(), getTensorInfoOut);
1048             std::vector<TensorInfo> outputs(beginO, endO);
1049
1050
1051             auto getTensorInfoPtr = [](const TensorInfo& info)
1052                 {
1053                     return &info;
1054                 };
1055             auto beginPtrI = boost::make_transform_iterator(inputs.begin(), getTensorInfoPtr);
1056             auto endPtrI = boost::make_transform_iterator(inputs.end(), getTensorInfoPtr);
1057             std::vector<const TensorInfo*> inputPtrs(beginPtrI, endPtrI);
1058
1059             auto beginPtrO = boost::make_transform_iterator(outputs.begin(), getTensorInfoPtr);
1060             auto endPtrO = boost::make_transform_iterator(outputs.end(), getTensorInfoPtr);
1061             std::vector<const TensorInfo*> outputPtrs(beginPtrO, endPtrO);
1062
1063
1064             result = layerSupportObject->IsStandInSupported(inputPtrs,
1065                                                             outputPtrs,
1066                                                             cLayer->GetParameters(),
1067                                                             reason);
1068             break;
1069         }
1070         case LayerType::StridedSlice:
1071         {
1072             auto cLayer = PolymorphicDowncast<const StridedSliceLayer*>(&layer);
1073             const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1074             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1075             result = layerSupportObject->IsStridedSliceSupported(OverrideDataType(input, dataType),
1076                                                                  OverrideDataType(output, dataType),
1077                                                                  cLayer->GetParameters(),
1078                                                                  reason);
1079             break;
1080         }
1081         case LayerType::Subtraction:
1082         {
1083             const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1084             const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
1085             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1086             result = layerSupportObject->IsSubtractionSupported(
1087                                             OverrideDataType(input0, dataType),
1088                                             OverrideDataType(input1, dataType),
1089                                             OverrideDataType(output, dataType),
1090                                             reason);
1091             break;
1092         }
1093         case LayerType::Switch:
1094         {
1095             const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1096             const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
1097             const TensorInfo& output0 = layer.GetOutputSlot(0).GetTensorInfo();
1098             const TensorInfo& output1 = layer.GetOutputSlot(1).GetTensorInfo();
1099             result = layerSupportObject->IsSwitchSupported(OverrideDataType(input0, dataType),
1100                                                            OverrideDataType(input1, dataType),
1101                                                            OverrideDataType(output0, dataType),
1102                                                            OverrideDataType(output1, dataType),
1103                                                            reason);
1104             break;
1105         }
1106         case LayerType::Mean:
1107         {
1108             auto cLayer = PolymorphicDowncast<const MeanLayer*>(&layer);
1109             const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1110             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1111             result = layerSupportObject->IsMeanSupported(
1112                                      OverrideDataType(input, dataType),
1113                                      OverrideDataType(output, dataType),
1114                                      cLayer->GetParameters(),
1115                                      reason);
1116             break;
1117         }
1118         case LayerType::Minimum:
1119         {
1120             const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1121             const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
1122             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1123             result = layerSupportObject->IsMinimumSupported(OverrideDataType(input0, dataType),
1124                                                             OverrideDataType(input1, dataType),
1125                                                             OverrideDataType(output, dataType),
1126                                                             reason);
1127             break;
1128         }
1129         case LayerType::Prelu:
1130         {
1131             const TensorInfo& input  = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1132             const TensorInfo& alpha  = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
1133             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1134             result = layerSupportObject->IsPreluSupported(OverrideDataType(input,  dataType),
1135                                                           OverrideDataType(alpha,  dataType),
1136                                                           OverrideDataType(output, dataType),
1137                                                           reason);
1138             break;
1139         }
1140         case LayerType::Transpose:
1141         {
1142             auto cLayer = PolymorphicDowncast<const TransposeLayer*>(&layer);
1143             const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
1144             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
1145             result = layerSupportObject->IsTransposeSupported(OverrideDataType(input, dataType),
1146                                                               OverrideDataType(output, dataType),
1147                                                               cLayer->GetParameters(),
1148                                                               reason);
1149             break;
1150         }
1151         case LayerType::TransposeConvolution2d:
1152         {
1153             auto cLayer = PolymorphicDowncast<const TransposeConvolution2dLayer*>(&layer);
1154
1155             const TensorInfo input  = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(),
1156                                                        dataType);
1157             const TensorInfo output = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
1158
1159             const TransposeConvolution2dDescriptor& descriptor  = cLayer->GetParameters();
1160
1161             Optional<TensorInfo> biases;
1162             if (descriptor.m_BiasEnabled)
1163             {
1164                 ARMNN_ASSERT(cLayer->m_Bias.get() != nullptr);
1165                 biases = OverrideDataType(cLayer->m_Bias->GetTensorInfo(),
1166                                           GetBiasTypeFromWeightsType(dataType));
1167             }
1168
1169             ARMNN_ASSERT(cLayer->m_Weight.get() != nullptr);
1170             const TensorInfo weights = OverrideDataType(cLayer->m_Weight->GetTensorInfo(), dataType);
1171
1172             result = layerSupportObject->IsTransposeConvolution2dSupported(input,
1173                                                                            output,
1174                                                                            descriptor,
1175                                                                            weights,
1176                                                                            biases,
1177                                                                            reason);
1178
1179             break;
1180         }
1181         default:
1182         {
1183             ARMNN_ASSERT_MSG(false, "WorkloadFactory did not recognise type of layer.");
1184             reason.value() = "Unrecognised layer type";
1185             result = false;
1186             break;
1187         }
1188     }
1189     return result;
1190 }
1191
1192 bool IWorkloadFactory::IsLayerSupported(const IConnectableLayer& connectableLayer,
1193                                         Optional<DataType> dataType,
1194                                         std::string& outReasonIfUnsupported)
1195 {
1196     auto layer = PolymorphicDowncast<const Layer*>(&connectableLayer);
1197     return IsLayerSupported(layer->GetBackendId(), connectableLayer, dataType, outReasonIfUnsupported);
1198 }
1199
1200 // Default Implementations
1201 std::unique_ptr<IWorkload> IWorkloadFactory::CreateAbs(const AbsQueueDescriptor& /*descriptor*/,
1202                                                        const WorkloadInfo& /*info*/) const
1203 {
1204     return std::unique_ptr<IWorkload>();
1205 }
1206
1207 std::unique_ptr<IWorkload> IWorkloadFactory::CreateActivation(const ActivationQueueDescriptor& /*descriptor*/,
1208                                                               const WorkloadInfo& /*info*/) const
1209 {
1210     return std::unique_ptr<IWorkload>();
1211 }
1212
1213 std::unique_ptr<IWorkload> IWorkloadFactory::CreateAddition(const AdditionQueueDescriptor& /*descriptor*/,
1214                                                             const WorkloadInfo& /*info*/) const
1215 {
1216     return std::unique_ptr<IWorkload>();
1217 }
1218
1219 std::unique_ptr<IWorkload> IWorkloadFactory::CreateArgMinMax(const ArgMinMaxQueueDescriptor& /*descriptor*/,
1220                                                              const WorkloadInfo& /*info*/) const
1221 {
1222     return std::unique_ptr<IWorkload>();
1223 }
1224
1225 std::unique_ptr<IWorkload> IWorkloadFactory::CreateBatchNormalization(
1226     const BatchNormalizationQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const
1227 {
1228     return std::unique_ptr<IWorkload>();
1229 }
1230
1231 std::unique_ptr<IWorkload> IWorkloadFactory::CreateBatchToSpaceNd(const BatchToSpaceNdQueueDescriptor& /*desc*/,
1232                                                                   const WorkloadInfo& /*Info*/) const
1233 {
1234     return std::unique_ptr<IWorkload>();
1235 }
1236
1237 std::unique_ptr<IWorkload> IWorkloadFactory::CreateComparison(const ComparisonQueueDescriptor& /*descriptor*/,
1238                                                               const WorkloadInfo& /*info*/) const
1239 {
1240     return std::unique_ptr<IWorkload>();
1241 }
1242
1243 std::unique_ptr<IWorkload> IWorkloadFactory::CreateConcat(const ConcatQueueDescriptor& /*descriptor*/,
1244                                                           const WorkloadInfo& /*info*/) const
1245 {
1246     return std::unique_ptr<IWorkload>();
1247 }
1248
1249 std::unique_ptr<IWorkload> IWorkloadFactory::CreateConstant(const ConstantQueueDescriptor& /*descriptor*/,
1250                                                             const WorkloadInfo& /*info*/) const
1251 {
1252     return std::unique_ptr<IWorkload>();
1253 }
1254
1255 std::unique_ptr<IWorkload> IWorkloadFactory::CreateConvertBf16ToFp32(const ConvertBf16ToFp32QueueDescriptor& /*desc*/,
1256                                                                      const WorkloadInfo& /*info*/) const
1257 {
1258     return std::unique_ptr<IWorkload>();
1259 }
1260
1261 std::unique_ptr<IWorkload> IWorkloadFactory::CreateConvertFp16ToFp32(const ConvertFp16ToFp32QueueDescriptor& /*desc*/,
1262                                                                      const WorkloadInfo& /*info*/) const
1263 {
1264     return std::unique_ptr<IWorkload>();
1265 }
1266
1267 std::unique_ptr<IWorkload> IWorkloadFactory::CreateConvertFp32ToBf16(const ConvertFp32ToBf16QueueDescriptor& /*desc*/,
1268                                                                      const WorkloadInfo& /*info*/) const
1269 {
1270     return std::unique_ptr<IWorkload>();
1271 }
1272
1273 std::unique_ptr<IWorkload> IWorkloadFactory::CreateConvertFp32ToFp16(const ConvertFp32ToFp16QueueDescriptor& /*desc*/,
1274                                                                      const WorkloadInfo& /*info*/) const
1275 {
1276     return std::unique_ptr<IWorkload>();
1277 }
1278
1279 std::unique_ptr<IWorkload> IWorkloadFactory::CreateConvolution2d(const Convolution2dQueueDescriptor& /*descriptor*/,
1280                                                                  const WorkloadInfo& /*info*/) const
1281 {
1282     return std::unique_ptr<IWorkload>();
1283 }
1284
1285 std::unique_ptr<IWorkload> IWorkloadFactory::CreateDebug(const DebugQueueDescriptor& /*descriptor*/,
1286                                                          const WorkloadInfo& /*info*/) const
1287 {
1288     return std::unique_ptr<IWorkload>();
1289 }
1290
1291 std::unique_ptr<IWorkload> IWorkloadFactory::CreateDepthToSpace(const DepthToSpaceQueueDescriptor& /*descriptor*/,
1292                                                                 const WorkloadInfo& /*info*/) const
1293 {
1294     return std::unique_ptr<IWorkload>();
1295 }
1296
1297 std::unique_ptr<IWorkload> IWorkloadFactory::CreateDepthwiseConvolution2d(
1298     const DepthwiseConvolution2dQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const
1299 {
1300     return std::unique_ptr<IWorkload>();
1301 }
1302
1303 std::unique_ptr<IWorkload> IWorkloadFactory::CreateDequantize(
1304     const DequantizeQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const
1305 {
1306     return std::unique_ptr<IWorkload>();
1307 }
1308
1309 std::unique_ptr<IWorkload> IWorkloadFactory::CreateDetectionPostProcess(
1310     const DetectionPostProcessQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const
1311 {
1312     return std::unique_ptr<IWorkload>();
1313 }
1314
1315 std::unique_ptr<IWorkload> IWorkloadFactory::CreateDivision(const DivisionQueueDescriptor& /*descriptor*/,
1316                                                             const WorkloadInfo& /*info*/) const
1317 {
1318     return std::unique_ptr<IWorkload>();
1319 }
1320
1321 std::unique_ptr<IWorkload> IWorkloadFactory::CreateElementwiseUnary(const ElementwiseUnaryQueueDescriptor& /*desc*/,
1322                                                                     const WorkloadInfo& /*info*/) const
1323 {
1324     return std::unique_ptr<IWorkload>();
1325 }
1326
1327 std::unique_ptr<IWorkload> IWorkloadFactory::CreateEqual(const EqualQueueDescriptor& /*descriptor*/,
1328                                                          const WorkloadInfo& /*Info*/) const
1329 {
1330     return std::unique_ptr<IWorkload>();
1331 }
1332
1333 std::unique_ptr<IWorkload> IWorkloadFactory::CreateFakeQuantization(const FakeQuantizationQueueDescriptor& /*desc*/,
1334                                                                     const WorkloadInfo& /*info*/) const
1335 {
1336     return std::unique_ptr<IWorkload>();
1337 }
1338
1339 std::unique_ptr<IWorkload> IWorkloadFactory::CreateFloor(const FloorQueueDescriptor& /*descriptor*/,
1340                                                          const WorkloadInfo& /*info*/) const
1341 {
1342     return std::unique_ptr<IWorkload>();
1343 }
1344
1345 std::unique_ptr<IWorkload> IWorkloadFactory::CreateFullyConnected(const FullyConnectedQueueDescriptor& /*descriptor*/,
1346                                                                   const WorkloadInfo& /*info*/) const
1347 {
1348     return std::unique_ptr<IWorkload>();
1349 }
1350
1351 std::unique_ptr<IWorkload> IWorkloadFactory::CreateGather(const GatherQueueDescriptor& /*descriptor*/,
1352                                                           const WorkloadInfo& /*info*/) const
1353 {
1354     return std::unique_ptr<IWorkload>();
1355 }
1356
1357 std::unique_ptr<IWorkload> IWorkloadFactory::CreateGreater(const GreaterQueueDescriptor& /*descriptor*/,
1358                                                            const WorkloadInfo& /*info*/) const
1359 {
1360     return std::unique_ptr<IWorkload>();
1361 }
1362
1363 std::unique_ptr<IWorkload> IWorkloadFactory::CreateInstanceNormalization(
1364     const InstanceNormalizationQueueDescriptor& /*descriptor*/,
1365     const WorkloadInfo& /*info*/) const
1366 {
1367     return std::unique_ptr<IWorkload>();
1368 }
1369
1370 std::unique_ptr<IWorkload> IWorkloadFactory::CreateL2Normalization(const L2NormalizationQueueDescriptor& /*desc*/,
1371                                                                    const WorkloadInfo& /*info*/) const
1372 {
1373     return std::unique_ptr<IWorkload>();
1374 }
1375
1376 std::unique_ptr<IWorkload> IWorkloadFactory::CreateLogSoftmax(const LogSoftmaxQueueDescriptor& /*descriptor*/,
1377                                                               const WorkloadInfo& /*info*/) const
1378 {
1379     return std::unique_ptr<IWorkload>();
1380 }
1381
1382 std::unique_ptr<IWorkload> IWorkloadFactory::CreateLstm(const LstmQueueDescriptor& /*descriptor*/,
1383                                                         const WorkloadInfo& /*info*/) const
1384 {
1385     return std::unique_ptr<IWorkload>();
1386 }
1387
1388 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMaximum(const MaximumQueueDescriptor& /*descriptor*/,
1389                                                            const WorkloadInfo& /*info*/) const
1390 {
1391     return std::unique_ptr<IWorkload>();
1392 }
1393
1394 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMean(const MeanQueueDescriptor& /*descriptor*/,
1395                                                         const WorkloadInfo& /*Info*/) const
1396 {
1397     return std::unique_ptr<IWorkload>();
1398 }
1399
1400 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMemCopy(const MemCopyQueueDescriptor& /*descriptor*/,
1401                                                            const WorkloadInfo& /*info*/) const
1402 {
1403     return std::unique_ptr<IWorkload>();
1404 }
1405
1406 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMemImport(const MemImportQueueDescriptor& /*descriptor*/,
1407                                                              const WorkloadInfo& /*info*/) const
1408 {
1409     return std::unique_ptr<IWorkload>();
1410 }
1411
1412 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMerge(const MergeQueueDescriptor& /*descriptor*/,
1413                                                          const WorkloadInfo& /*info*/) const
1414 {
1415     return std::unique_ptr<IWorkload>();
1416 }
1417
1418 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMerger(const MergerQueueDescriptor& /*descriptor*/,
1419                                                           const WorkloadInfo& /*info*/) const
1420 {
1421     return std::unique_ptr<IWorkload>();
1422 }
1423
1424 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMinimum(const MinimumQueueDescriptor& /*descriptor*/,
1425                                                            const WorkloadInfo& /*info*/) const
1426 {
1427     return std::unique_ptr<IWorkload>();
1428 }
1429
1430 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMultiplication(const MultiplicationQueueDescriptor& /*descriptor*/,
1431                                                                   const WorkloadInfo& /*info*/) const
1432 {
1433     return std::unique_ptr<IWorkload>();
1434 }
1435
1436 std::unique_ptr<IWorkload> IWorkloadFactory::CreateNormalization(const NormalizationQueueDescriptor& /*descriptor*/,
1437                                                                  const WorkloadInfo& /*info*/) const
1438 {
1439     return std::unique_ptr<IWorkload>();
1440 }
1441
1442 std::unique_ptr<IWorkload> IWorkloadFactory::CreateOutput(const OutputQueueDescriptor& /*descriptor*/,
1443                                                           const WorkloadInfo& /*info*/) const
1444 {
1445     return std::unique_ptr<IWorkload>();
1446 }
1447
1448 std::unique_ptr<IWorkload> IWorkloadFactory::CreatePad(const PadQueueDescriptor& /*descriptor*/,
1449                                                        const WorkloadInfo& /*Info*/) const
1450 {
1451     return std::unique_ptr<IWorkload>();
1452 }
1453
1454 std::unique_ptr<IWorkload> IWorkloadFactory::CreatePermute(const PermuteQueueDescriptor& /*descriptor*/,
1455                                                            const WorkloadInfo& /*info*/) const
1456 {
1457     return std::unique_ptr<IWorkload>();
1458 }
1459
1460 std::unique_ptr<IWorkload> IWorkloadFactory::CreatePooling2d(const Pooling2dQueueDescriptor& /*descriptor*/,
1461                                                              const WorkloadInfo& /*info*/) const
1462 {
1463     return std::unique_ptr<IWorkload>();
1464 }
1465
1466 std::unique_ptr<IWorkload> IWorkloadFactory::CreatePreCompiled(const PreCompiledQueueDescriptor& /*descriptor*/,
1467                                                                const WorkloadInfo& /*info*/) const
1468 {
1469     return std::unique_ptr<IWorkload>();
1470 }
1471
1472 std::unique_ptr<IWorkload> IWorkloadFactory::CreatePrelu(const PreluQueueDescriptor &/*descriptor*/,
1473                                                          const WorkloadInfo &/*info*/) const
1474 {
1475     return std::unique_ptr<IWorkload>();
1476 }
1477
1478 std::unique_ptr<IWorkload> IWorkloadFactory::CreateQuantize(const QuantizeQueueDescriptor& /*descriptor*/,
1479                                                             const WorkloadInfo& /*Info*/) const
1480 {
1481     return std::unique_ptr<IWorkload>();
1482 }
1483
1484 std::unique_ptr<IWorkload> IWorkloadFactory::CreateQLstm(const QLstmQueueDescriptor& /*descriptor*/,
1485                                                          const WorkloadInfo& /*info*/) const
1486 {
1487     return std::unique_ptr<IWorkload>();
1488 }
1489
1490 std::unique_ptr<IWorkload> IWorkloadFactory::CreateQuantizedLstm(const QuantizedLstmQueueDescriptor& /*descriptor*/,
1491                                                                  const WorkloadInfo& /*info*/) const
1492 {
1493     return std::unique_ptr<IWorkload>();
1494 }
1495
1496 std::unique_ptr<IWorkload> IWorkloadFactory::CreateReshape(const ReshapeQueueDescriptor& /*descriptor*/,
1497                                                            const WorkloadInfo& /*info*/) const
1498 {
1499     return std::unique_ptr<IWorkload>();
1500 }
1501
1502 std::unique_ptr<IWorkload> IWorkloadFactory::CreateResizeBilinear(const ResizeBilinearQueueDescriptor& /*descriptor*/,
1503                                                                   const WorkloadInfo& /*info*/) const
1504 {
1505     return std::unique_ptr<IWorkload>();
1506 }
1507
1508 std::unique_ptr<IWorkload> IWorkloadFactory::CreateResize(const ResizeQueueDescriptor& /*descriptor*/,
1509                                                             const WorkloadInfo& /*info*/) const
1510 {
1511     return std::unique_ptr<IWorkload>();
1512 }
1513
1514 std::unique_ptr<IWorkload> IWorkloadFactory::CreateRsqrt(const RsqrtQueueDescriptor& /*descriptor*/,
1515                                                          const WorkloadInfo& /*info*/) const
1516 {
1517     return std::unique_ptr<IWorkload>();
1518 }
1519
1520 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSlice(const SliceQueueDescriptor& /*descriptor*/,
1521                                                          const WorkloadInfo& /*info*/) const
1522 {
1523     return std::unique_ptr<IWorkload>();
1524 }
1525
1526 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSoftmax(const SoftmaxQueueDescriptor& /*descriptor*/,
1527                                                            const WorkloadInfo& /*info*/) const
1528 {
1529     return std::unique_ptr<IWorkload>();
1530 }
1531
1532 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSplitter(const SplitterQueueDescriptor& /*descriptor*/,
1533                                                             const WorkloadInfo& /*info*/) const
1534 {
1535     return std::unique_ptr<IWorkload>();
1536 }
1537
1538 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSpaceToBatchNd(const SpaceToBatchNdQueueDescriptor& /*descriptor*/,
1539                                                                   const WorkloadInfo& /*info*/) const
1540 {
1541     return std::unique_ptr<IWorkload>();
1542 }
1543
1544 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSpaceToDepth(const SpaceToDepthQueueDescriptor& /*descriptor*/,
1545                                                                 const WorkloadInfo& /*info*/) const
1546 {
1547     return std::unique_ptr<IWorkload>();
1548 }
1549
1550 std::unique_ptr<IWorkload> IWorkloadFactory::CreateStack(const StackQueueDescriptor& /*descriptor*/,
1551                                                          const WorkloadInfo& /*info*/) const
1552 {
1553     return std::unique_ptr<IWorkload>();
1554 }
1555
1556 std::unique_ptr<IWorkload> IWorkloadFactory::CreateStridedSlice(const StridedSliceQueueDescriptor& /*descriptor*/,
1557                                                                 const WorkloadInfo& /*info*/) const
1558 {
1559     return std::unique_ptr<IWorkload>();
1560 }
1561
1562 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSubtraction(const SubtractionQueueDescriptor& /*descriptor*/,
1563                                                                const WorkloadInfo& /*info*/) const
1564 {
1565     return std::unique_ptr<IWorkload>();
1566 }
1567
1568 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSwitch(const SwitchQueueDescriptor& /*descriptor*/,
1569                                                           const WorkloadInfo& /*info*/) const
1570 {
1571     return std::unique_ptr<IWorkload>();
1572 }
1573
1574 std::unique_ptr<IWorkload> IWorkloadFactory::CreateTranspose(const TransposeQueueDescriptor& /*descriptor*/,
1575                                                              const WorkloadInfo& /*info*/) const
1576 {
1577     return std::unique_ptr<IWorkload>();
1578 }
1579
1580 std::unique_ptr<IWorkload> IWorkloadFactory::CreateTransposeConvolution2d(
1581     const TransposeConvolution2dQueueDescriptor& /*descriptor*/,
1582     const WorkloadInfo& /*info*/) const
1583 {
1584     return std::unique_ptr<IWorkload>();
1585 }
1586
1587 } // namepsace armnn