9d081af8e984c99701464fffce2b46f04ef9ecbc
[platform/upstream/armnn.git] / src / backends / backendsCommon / WorkloadFactory.cpp
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5
6 #include "CpuTensorHandle.hpp"
7 #include "WorkloadFactory.hpp"
8
9
10 #include <Layer.hpp>
11 #include <LayersFwd.hpp>
12
13 #include <armnn/Types.hpp>
14 #include <armnn/LayerSupport.hpp>
15 #include <armnn/ILayerSupport.hpp>
16
17 #include <backendsCommon/BackendRegistry.hpp>
18 #include <backendsCommon/WorkloadFactory.hpp>
19 #include <backendsCommon/IBackendInternal.hpp>
20 #include <backendsCommon/test/WorkloadTestUtils.hpp>
21
22 #include <boost/cast.hpp>
23 #include <boost/iterator/transform_iterator.hpp>
24
25 #include <cstring>
26 #include <sstream>
27
28 namespace armnn
29 {
30
31 namespace
32 {
33
34 const TensorInfo OverrideDataType(const TensorInfo& info, Optional<DataType> type)
35 {
36     if (!type)
37     {
38         return info;
39     }
40
41     return TensorInfo(info.GetShape(), type.value(), info.GetQuantizationScale(), info.GetQuantizationOffset());
42 }
43
44 } // anonymous namespace
45
46 bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId,
47                                         const IConnectableLayer& connectableLayer,
48                                         Optional<DataType> dataType,
49                                         std::string& outReasonIfUnsupported)
50 {
51     Optional<std::string&> reason = outReasonIfUnsupported;
52     bool result;
53     const Layer& layer = *(boost::polymorphic_downcast<const Layer*>(&connectableLayer));
54
55     auto const& backendRegistry = BackendRegistryInstance();
56     if (!backendRegistry.IsBackendRegistered(backendId))
57     {
58         std::stringstream ss;
59         ss << connectableLayer.GetName() << " is not supported on " << backendId
60            << " because this backend is not registered.";
61
62         outReasonIfUnsupported = ss.str();
63         return false;
64     }
65
66     auto backendFactory = backendRegistry.GetFactory(backendId);
67     auto backendObject = backendFactory();
68     auto layerSupportObject = backendObject->GetLayerSupport();
69
70     switch(layer.GetType())
71     {
72         case LayerType::Abs:
73         {
74             const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
75             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
76             result = layerSupportObject->IsAbsSupported(OverrideDataType(input, dataType),
77                                                         OverrideDataType(output, dataType),
78                                                         reason);
79             break;
80         }
81         case LayerType::Activation:
82         {
83             auto cLayer = boost::polymorphic_downcast<const ActivationLayer*>(&layer);
84             const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
85             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
86             result = layerSupportObject->IsActivationSupported(
87                                            OverrideDataType(input, dataType),
88                                            OverrideDataType(output, dataType),
89                                            cLayer->GetParameters(),
90                                            reason);
91             break;
92         }
93         case LayerType::Addition:
94         {
95             const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
96             const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
97             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
98             result = layerSupportObject->IsAdditionSupported(
99                                         OverrideDataType(input0, dataType),
100                                         OverrideDataType(input1, dataType),
101                                         OverrideDataType(output, dataType),
102                                         reason);
103             break;
104         }
105         case LayerType::BatchNormalization:
106         {
107             auto cLayer = boost::polymorphic_downcast<const BatchNormalizationLayer*>(&layer);
108             const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
109             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
110             const TensorInfo& mean = cLayer->m_Mean->GetTensorInfo();
111             const TensorInfo& var = cLayer->m_Variance->GetTensorInfo();
112             const TensorInfo& beta = cLayer->m_Beta->GetTensorInfo();
113             const TensorInfo& gamma = cLayer->m_Gamma->GetTensorInfo();
114             result = layerSupportObject->IsBatchNormalizationSupported(
115                                                    OverrideDataType(input, dataType),
116                                                    OverrideDataType(output, dataType),
117                                                    OverrideDataType(mean, dataType),
118                                                    OverrideDataType(var, dataType),
119                                                    OverrideDataType(beta, dataType),
120                                                    OverrideDataType(gamma, dataType),
121                                                    cLayer->GetParameters(),
122                                                    reason);
123             break;
124         }
125         case LayerType::BatchToSpaceNd:
126         {
127             const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
128             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
129             auto cLayer = boost::polymorphic_downcast<const BatchToSpaceNdLayer*>(&layer);
130
131             result = layerSupportObject->IsBatchToSpaceNdSupported(OverrideDataType(input, dataType),
132                                                                    OverrideDataType(output, dataType),
133                                                                    cLayer->GetParameters(),
134                                                                    reason);
135             break;
136         }
137         case LayerType::Constant:
138         {
139             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
140             result = layerSupportObject->IsConstantSupported(OverrideDataType(output, dataType), reason);
141             break;
142         }
143         case LayerType::ConvertFp16ToFp32:
144         {
145             const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
146             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
147             result = layerSupportObject->IsConvertFp16ToFp32Supported(input, output, reason);
148             break;
149         }
150         case LayerType::ConvertFp32ToFp16:
151         {
152             const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
153             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
154             result = layerSupportObject->IsConvertFp32ToFp16Supported(input, output, reason);
155             break;
156         }
157         case LayerType::Convolution2d:
158         {
159             auto cLayer = boost::polymorphic_downcast<const Convolution2dLayer*>(&layer);
160
161             const TensorInfo input  = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(),
162                                                        dataType);
163             const TensorInfo output = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
164             BOOST_ASSERT(cLayer->m_Weight.get() != nullptr);
165
166             const Convolution2dDescriptor& descriptor  = cLayer->GetParameters();
167
168             // Construct optional biases object based on the value of m_BiasEnabled
169             Optional<TensorInfo> biases;
170             if (descriptor.m_BiasEnabled)
171             {
172                 biases =
173                     OverrideDataType(cLayer->m_Bias->GetTensorInfo(), GetBiasTypeFromWeightsType(dataType));
174             }
175
176             result = layerSupportObject->IsConvolution2dSupported(
177                                               input,
178                                               output,
179                                               descriptor,
180                                               OverrideDataType(cLayer->m_Weight->GetTensorInfo(), dataType),
181                                               biases,
182                                               reason);
183             break;
184         }
185         case LayerType::Debug:
186         {
187             const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
188             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
189
190             result = layerSupportObject->IsDebugSupported(OverrideDataType(input, dataType),
191                                                           OverrideDataType(output, dataType),
192                                                           reason);
193             break;
194         }
195         case LayerType::DepthwiseConvolution2d:
196         {
197             auto cLayer = boost::polymorphic_downcast<const DepthwiseConvolution2dLayer*>(&layer);
198             const TensorInfo& input = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(),
199                                                        dataType);
200             const TensorInfo& output = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
201             BOOST_ASSERT(cLayer->m_Weight.get() != nullptr);
202
203             const DepthwiseConvolution2dDescriptor& descriptor = cLayer->GetParameters();
204
205             // Construct optional biases object based on the value of m_BiasEnabled
206             Optional<TensorInfo> biases;
207             if (descriptor.m_BiasEnabled)
208             {
209                 biases =
210                     OverrideDataType(cLayer->m_Bias->GetTensorInfo(), GetBiasTypeFromWeightsType(dataType));
211             }
212
213             result = layerSupportObject->IsDepthwiseConvolutionSupported(
214                                                      input,
215                                                      output,
216                                                      descriptor,
217                                                      OverrideDataType(cLayer->m_Weight->GetTensorInfo(), dataType),
218                                                      biases,
219                                                      reason);
220             break;
221         }
222         case LayerType::Dequantize:
223         {
224             const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
225             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
226
227             result = layerSupportObject->IsDequantizeSupported(OverrideDataType(input, dataType),
228                                                                OverrideDataType(output, DataType::Float32),
229                                                                reason);
230             break;
231         }
232         case LayerType::DetectionPostProcess:
233         {
234             const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
235             const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
236             auto cLayer = boost::polymorphic_downcast<const DetectionPostProcessLayer*>(&layer);
237             const DetectionPostProcessDescriptor& descriptor = cLayer->GetParameters();
238             result = layerSupportObject->IsDetectionPostProcessSupported(input0,
239                                                                          input1,
240                                                                          descriptor,
241                                                                          reason);
242             break;
243         }
244         case LayerType::Equal:
245         {
246             const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
247             const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
248             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
249             result = layerSupportObject->IsEqualSupported(OverrideDataType(input0, dataType),
250                                                           OverrideDataType(input1, dataType),
251                                                           OverrideDataType(output, dataType),
252                                                           reason);
253             break;
254         }
255         case LayerType::FakeQuantization:
256         {
257             auto cLayer = boost::polymorphic_downcast<const FakeQuantizationLayer*>(&layer);
258             const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
259             result = layerSupportObject->IsFakeQuantizationSupported(OverrideDataType(input, dataType),
260                                                                      cLayer->GetParameters(),
261                                                                      reason);
262             break;
263         }
264         case LayerType::Floor:
265         {
266             const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
267             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
268             result = layerSupportObject->IsFloorSupported(OverrideDataType(input, dataType),
269                                                           OverrideDataType(output, dataType),
270                                                           reason);
271             break;
272         }
273         case LayerType::FullyConnected:
274         {
275             auto cLayer = boost::polymorphic_downcast<const FullyConnectedLayer*>(&layer);
276             const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
277             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
278             BOOST_ASSERT(cLayer->m_Weight.get() != nullptr);
279
280             TensorInfo biasInfo;
281             const TensorInfo * biasInfoPtr = nullptr;
282             static const TensorInfo dummyFloat16Bias(TensorShape({1,1,1,1}), DataType::Float16);
283             static const TensorInfo dummyFloat32Bias(TensorShape({1,1,1,1}), DataType::Float32);
284             static const TensorInfo dummyQA8Bias(TensorShape({1,1,1,1}), DataType::Signed32);
285
286             const FullyConnectedDescriptor& descriptor = cLayer->GetParameters();
287             if (descriptor.m_BiasEnabled)
288             {
289                 BOOST_ASSERT(cLayer->m_Bias.get() != nullptr);
290                 biasInfo = OverrideDataType(cLayer->m_Bias->GetTensorInfo(), GetBiasTypeFromWeightsType(dataType));
291                 biasInfoPtr = &biasInfo;
292             }
293             else
294             {
295                 // If biases are not enabled pass a dummy tensorinfo for the validation
296                 switch(input.GetDataType())
297                 {
298                     case DataType::Float16:
299                     {
300                         biasInfoPtr = &dummyFloat16Bias;
301                         break;
302                     }
303                     case DataType::Float32:
304                     {
305                         biasInfoPtr = &dummyFloat32Bias;
306                         break;
307                     }
308                     case DataType::QuantisedAsymm8:
309                     case DataType::QuantisedSymm16:
310                     {
311                         biasInfoPtr = &dummyQA8Bias;
312                         break;
313                     }
314                     default:
315                     {
316                         BOOST_ASSERT_MSG(false, "Unexpected bias type");
317                     }
318                 }
319             }
320
321             result = layerSupportObject->IsFullyConnectedSupported(
322                                                OverrideDataType(input, dataType),
323                                                OverrideDataType(output, dataType),
324                                                OverrideDataType(cLayer->m_Weight->GetTensorInfo(), dataType),
325                                                *biasInfoPtr,
326                                                descriptor,
327                                                reason);
328             break;
329         }
330         case LayerType::Gather:
331         {
332             const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
333             const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
334             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
335             result = layerSupportObject->IsGatherSupported(OverrideDataType(input0, dataType),
336                                                            input1,
337                                                            OverrideDataType(output, dataType),
338                                                            reason);
339             break;
340         }
341         case LayerType::Input:
342         {
343             const TensorInfo& input = layer.GetOutputSlot(0).GetTensorInfo();
344             result = layerSupportObject->IsInputSupported(OverrideDataType(input, dataType), reason);
345             break;
346         }
347         case LayerType::L2Normalization:
348         {
349             auto cLayer = boost::polymorphic_downcast<const L2NormalizationLayer*>(&layer);
350             const L2NormalizationDescriptor& descriptor = cLayer->GetParameters();
351
352             const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
353             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
354
355             result = layerSupportObject->IsL2NormalizationSupported(
356                                                 OverrideDataType(input, dataType),
357                                                 OverrideDataType(output, dataType),
358                                                 descriptor,
359                                                 reason);
360             break;
361         }
362         case LayerType::Lstm:
363         {
364             auto cLayer = boost::polymorphic_downcast<const LstmLayer*>(&layer);
365             const LstmDescriptor& descriptor = cLayer->GetParameters();
366
367             // All inputs.
368             const TensorInfo& input = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(),
369                                                        dataType);
370             const TensorInfo& outputStateIn = OverrideDataType(layer.GetInputSlot(1).GetConnection()->GetTensorInfo(),
371                                                                dataType);
372             const TensorInfo& cellStateIn = OverrideDataType(layer.GetInputSlot(2).GetConnection()->GetTensorInfo(),
373                                                              dataType);
374             // All outputs
375             const TensorInfo& scratchBuffer = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
376             const TensorInfo& outputStateOut = OverrideDataType(layer.GetOutputSlot(1).GetTensorInfo(), dataType);
377             const TensorInfo& cellStateOut = OverrideDataType(layer.GetOutputSlot(2).GetTensorInfo(), dataType);
378             const TensorInfo& output = OverrideDataType(layer.GetOutputSlot(3).GetTensorInfo(), dataType);
379
380             // Basic parameters
381             const TensorInfo& inputToForgetWeights
382                     = OverrideDataType(cLayer->m_BasicParameters.m_InputToForgetWeights->GetTensorInfo(), dataType);
383             const TensorInfo& inputToCellWeights
384                     = OverrideDataType(cLayer->m_BasicParameters.m_InputToCellWeights->GetTensorInfo(), dataType);
385             const TensorInfo& inputToOutputWeights
386                     = OverrideDataType(cLayer->m_BasicParameters.m_InputToOutputWeights->GetTensorInfo(), dataType);
387             const TensorInfo& recurrentToForgetWeights
388                     = OverrideDataType(cLayer->m_BasicParameters.m_RecurrentToForgetWeights->GetTensorInfo(), dataType);
389             const TensorInfo& recurrentToCellWeights
390                     = OverrideDataType(cLayer->m_BasicParameters.m_RecurrentToCellWeights->GetTensorInfo(), dataType);
391             const TensorInfo& recurrentToOutputWeights
392                     = OverrideDataType(cLayer->m_BasicParameters.m_RecurrentToOutputWeights->GetTensorInfo(), dataType);
393             const TensorInfo& forgetGateBias
394                     = OverrideDataType(cLayer->m_BasicParameters.m_ForgetGateBias->GetTensorInfo(), dataType);
395             const TensorInfo& cellBias
396                     = OverrideDataType(cLayer->m_BasicParameters.m_CellBias->GetTensorInfo(), dataType);
397             const TensorInfo& outputGateBias
398                     = OverrideDataType(cLayer->m_BasicParameters.m_OutputGateBias->GetTensorInfo(), dataType);
399
400             LstmInputParamsInfo paramsInfo;
401
402             paramsInfo.m_InputToForgetWeights     = &inputToForgetWeights;
403             paramsInfo.m_InputToCellWeights       = &inputToCellWeights;
404             paramsInfo.m_InputToOutputWeights     = &inputToOutputWeights;
405             paramsInfo.m_RecurrentToForgetWeights = &recurrentToForgetWeights;
406             paramsInfo.m_RecurrentToCellWeights   = &recurrentToCellWeights;
407             paramsInfo.m_RecurrentToOutputWeights = &recurrentToOutputWeights;
408             paramsInfo.m_ForgetGateBias           = &forgetGateBias;
409             paramsInfo.m_CellBias                 = &cellBias;
410             paramsInfo.m_OutputGateBias           = &outputGateBias;
411
412
413             // Optional parameters
414             TensorInfo optInputToInputWeights;
415             TensorInfo optRecurrentToInputWeights;
416             TensorInfo optCellToInputWeights;
417             TensorInfo optInputGateBias;
418             TensorInfo optProjectionWeights;
419             TensorInfo optProjectionBias;
420             TensorInfo optCellToForgetWeights;
421             TensorInfo optCellToOutputWeights;
422             TensorInfo optInputLayerNormWeights;
423             TensorInfo optForgetLayerNormWeights;
424             TensorInfo optCellLayerNormWeights;
425             TensorInfo optOutputLayerNormWeights;
426
427             if(!descriptor.m_CifgEnabled)
428             {
429                 optInputToInputWeights =
430                     OverrideDataType(cLayer->m_CifgParameters.m_InputToInputWeights->GetTensorInfo(), dataType);
431                 paramsInfo.m_InputToInputWeights = &optInputToInputWeights;
432
433                 optRecurrentToInputWeights =
434                     OverrideDataType(cLayer->m_CifgParameters.m_RecurrentToInputWeights->GetTensorInfo(), dataType);
435                 paramsInfo.m_RecurrentToInputWeights = &optRecurrentToInputWeights;
436                 if (cLayer->m_CifgParameters.m_CellToInputWeights != nullptr)
437                 {
438                     optCellToInputWeights =
439                         OverrideDataType(cLayer->m_CifgParameters.m_CellToInputWeights->GetTensorInfo(), dataType);
440                     paramsInfo.m_CellToInputWeights = &optCellToInputWeights;
441                 }
442                 optInputGateBias =
443                        OverrideDataType(cLayer->m_CifgParameters.m_InputGateBias->GetTensorInfo(), dataType);
444                 paramsInfo.m_InputGateBias = &optInputGateBias;
445             }
446
447             if(descriptor.m_ProjectionEnabled)
448             {
449                 optProjectionWeights =
450                     OverrideDataType(cLayer->m_ProjectionParameters.m_ProjectionWeights->GetTensorInfo(), dataType);
451                 paramsInfo.m_ProjectionWeights = &optProjectionWeights;
452                 if (cLayer->m_ProjectionParameters.m_ProjectionBias != nullptr)
453                 {
454                     optProjectionBias =
455                         OverrideDataType(cLayer->m_ProjectionParameters.m_ProjectionBias->GetTensorInfo(), dataType);
456                     paramsInfo.m_ProjectionBias = &optProjectionBias;
457                 }
458             }
459
460             if(descriptor.m_PeepholeEnabled)
461             {
462                 optCellToForgetWeights =
463                     OverrideDataType(cLayer->m_PeepholeParameters.m_CellToForgetWeights->GetTensorInfo(), dataType);
464                 paramsInfo.m_CellToForgetWeights = &optCellToForgetWeights;
465                 optCellToOutputWeights =
466                     OverrideDataType(cLayer->m_PeepholeParameters.m_CellToOutputWeights->GetTensorInfo(), dataType);
467                 paramsInfo.m_CellToOutputWeights = &optCellToOutputWeights;
468             }
469
470             if(descriptor.m_LayerNormEnabled)
471             {
472                 if (!descriptor.m_CifgEnabled)
473                 {
474                     optInputLayerNormWeights = OverrideDataType(
475                             cLayer->m_LayerNormParameters.m_InputLayerNormWeights->GetTensorInfo(), dataType);
476                     paramsInfo.m_InputLayerNormWeights = &optInputLayerNormWeights;
477                 }
478
479                 optForgetLayerNormWeights = OverrideDataType(
480                         cLayer->m_LayerNormParameters.m_ForgetLayerNormWeights->GetTensorInfo(), dataType);
481                 paramsInfo.m_ForgetLayerNormWeights = &optForgetLayerNormWeights;
482
483                 optCellLayerNormWeights = OverrideDataType(
484                         cLayer->m_LayerNormParameters.m_CellLayerNormWeights->GetTensorInfo(), dataType);
485                 paramsInfo.m_CellLayerNormWeights = &optCellLayerNormWeights;
486
487                 optOutputLayerNormWeights = OverrideDataType(
488                         cLayer->m_LayerNormParameters.m_OutputLayerNormWeights->GetTensorInfo(), dataType);
489                 paramsInfo.m_OutputLayerNormWeights = &optOutputLayerNormWeights;
490             }
491
492             result = layerSupportObject->IsLstmSupported(
493                                      input,
494                                      outputStateIn,
495                                      cellStateIn,
496                                      scratchBuffer,
497                                      outputStateOut,
498                                      cellStateOut,
499                                      output,
500                                      descriptor,
501                                      paramsInfo,
502                                      reason);
503             break;
504         }
505         case LayerType::Maximum:
506         {
507             const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
508             const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
509             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
510
511             result = layerSupportObject->IsMaximumSupported(OverrideDataType(input0, dataType),
512                                                             OverrideDataType(input1, dataType),
513                                                             OverrideDataType(output, dataType),
514                                                             reason);
515             break;
516         }
517         case LayerType::MemCopy:
518         {
519             const TensorInfo& input  = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
520             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
521
522             result = layerSupportObject->IsMemCopySupported(OverrideDataType(input, dataType),
523                                                             OverrideDataType(output, dataType),
524                                                             reason);
525             break;
526         }
527         case LayerType::MemImport:
528         {
529             const TensorInfo& input  = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
530             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
531
532             result = layerSupportObject->IsMemImportSupported(OverrideDataType(input, dataType),
533                                                               OverrideDataType(output, dataType),
534                                                               reason);
535             break;
536         }
537         case LayerType::Merge:
538         {
539             const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
540             const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
541             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
542
543             result = layerSupportObject->IsMergeSupported(OverrideDataType(input0, dataType),
544                                                           OverrideDataType(input1, dataType),
545                                                           OverrideDataType(output, dataType),
546                                                           reason);
547             break;
548         }
549         case LayerType::Concat:
550         {
551             auto cLayer = boost::polymorphic_downcast<const ConcatLayer*>(&layer);
552
553             // Get vector of all inputs.
554             auto getTensorInfo = [&dataType](const InputSlot& slot)
555                 {
556                     return OverrideDataType(slot.GetConnectedOutputSlot()->GetTensorInfo(), dataType);
557                 };
558             auto beginI = boost::make_transform_iterator(layer.GetInputSlots().begin(), getTensorInfo);
559             auto endI = boost::make_transform_iterator(layer.GetInputSlots().end(), getTensorInfo);
560             std::vector<TensorInfo> inputs(beginI, endI);
561
562             auto getTensorInfoPtr = [](const TensorInfo& info)
563                 {
564                     return &info;
565                 };
566             auto beginPtr = boost::make_transform_iterator(inputs.begin(), getTensorInfoPtr);
567             auto endPtr = boost::make_transform_iterator(inputs.end(), getTensorInfoPtr);
568             std::vector<const TensorInfo*> inputPtrs(beginPtr, endPtr);
569
570             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
571
572             result = layerSupportObject->IsConcatSupported(inputPtrs, output, cLayer->GetParameters(), reason);
573
574
575             break;
576         }
577         case LayerType::Multiplication:
578         {
579             const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
580             const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
581             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
582             result = layerSupportObject->IsMultiplicationSupported(
583                                                OverrideDataType(input0, dataType),
584                                                OverrideDataType(input1, dataType),
585                                                OverrideDataType(output, dataType),
586                                                reason);
587             break;
588         }
589         case LayerType::Normalization:
590         {
591             auto cLayer = boost::polymorphic_downcast<const NormalizationLayer*>(&layer);
592             const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
593             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
594             result = layerSupportObject->IsNormalizationSupported(OverrideDataType(input, dataType),
595                                                                   OverrideDataType(output, dataType),
596                                                                   cLayer->GetParameters(),
597                                                                   reason);
598             break;
599         }
600         case LayerType::Output:
601         {
602             const TensorInfo& output = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
603             result = layerSupportObject->IsOutputSupported(OverrideDataType(output, dataType), reason);
604             break;
605         }
606         case LayerType::Permute:
607         {
608             auto cLayer = boost::polymorphic_downcast<const PermuteLayer*>(&layer);
609             const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
610             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
611             result = layerSupportObject->IsPermuteSupported(OverrideDataType(input, dataType),
612                                                             OverrideDataType(output, dataType),
613                                                             cLayer->GetParameters(),
614                                                             reason);
615             break;
616         }
617         case LayerType::Pad:
618         {
619             auto cLayer = boost::polymorphic_downcast<const PadLayer*>(&layer);
620             const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
621             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
622             result = layerSupportObject->IsPadSupported(
623                                     OverrideDataType(input, dataType),
624                                     OverrideDataType(output, dataType),
625                                     cLayer->GetParameters(),
626                                     reason);
627             break;
628         }
629         case LayerType::Pooling2d:
630         {
631             auto cLayer = boost::polymorphic_downcast<const Pooling2dLayer*>(&layer);
632             const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
633             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
634             result = layerSupportObject->IsPooling2dSupported(OverrideDataType(input, dataType),
635                                                               OverrideDataType(output, dataType),
636                                                               cLayer->GetParameters(),
637                                                               reason);
638             break;
639         }
640         case LayerType::PreCompiled:
641         {
642             auto cLayer = boost::polymorphic_downcast<const PreCompiledLayer*>(&layer);
643             const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
644             result = layerSupportObject->IsPreCompiledSupported(OverrideDataType(input, dataType),
645                                                                 cLayer->GetParameters(),
646                                                                 reason);
647             break;
648         }
649         case LayerType::Quantize:
650         {
651             const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
652             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
653             result = layerSupportObject->IsQuantizeSupported(input, output, reason);
654             break;
655         }
656         case LayerType::QuantizedLstm:
657         {
658             auto cLayer = boost::polymorphic_downcast<const QuantizedLstmLayer*>(&layer);
659
660             // Inputs
661             const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
662             const TensorInfo& previousCellStateIn = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
663             const TensorInfo& previousOutputIn = layer.GetInputSlot(2).GetConnection()->GetTensorInfo();
664
665             // Outputs
666             const TensorInfo& cellStateOut = layer.GetOutputSlot(0).GetTensorInfo();
667             const TensorInfo& output = layer.GetOutputSlot(1).GetTensorInfo();
668
669             // QuantizedLstm parameters
670             QuantizedLstmInputParamsInfo paramsInfo;
671
672             paramsInfo.m_InputToInputWeights      =
673                     &cLayer->m_QuantizedLstmParameters.m_InputToInputWeights->GetTensorInfo();
674             paramsInfo.m_InputToForgetWeights     =
675                     &cLayer->m_QuantizedLstmParameters.m_InputToForgetWeights->GetTensorInfo();
676             paramsInfo.m_InputToCellWeights       =
677                     &cLayer->m_QuantizedLstmParameters.m_InputToCellWeights->GetTensorInfo();
678             paramsInfo.m_InputToOutputWeights     =
679                     &cLayer->m_QuantizedLstmParameters.m_InputToOutputWeights->GetTensorInfo();
680
681             paramsInfo.m_RecurrentToInputWeights  =
682                     &cLayer->m_QuantizedLstmParameters.m_RecurrentToInputWeights->GetTensorInfo();
683             paramsInfo.m_RecurrentToForgetWeights =
684                     &cLayer->m_QuantizedLstmParameters.m_RecurrentToForgetWeights->GetTensorInfo();
685             paramsInfo.m_RecurrentToCellWeights   =
686                     &cLayer->m_QuantizedLstmParameters.m_RecurrentToCellWeights->GetTensorInfo();
687             paramsInfo.m_RecurrentToOutputWeights =
688                     &cLayer->m_QuantizedLstmParameters.m_RecurrentToOutputWeights->GetTensorInfo();
689
690             paramsInfo.m_InputGateBias            =
691                     &cLayer->m_QuantizedLstmParameters.m_InputGateBias->GetTensorInfo();
692             paramsInfo.m_ForgetGateBias           =
693                     &cLayer->m_QuantizedLstmParameters.m_ForgetGateBias->GetTensorInfo();
694             paramsInfo.m_CellBias                 =
695                     &cLayer->m_QuantizedLstmParameters.m_CellBias->GetTensorInfo();
696             paramsInfo.m_OutputGateBias           =
697                     &cLayer->m_QuantizedLstmParameters.m_OutputGateBias->GetTensorInfo();;
698
699             result = layerSupportObject->IsQuantizedLstmSupported(input,
700                                                                   previousCellStateIn,
701                                                                   previousOutputIn,
702                                                                   cellStateOut,
703                                                                   output,
704                                                                   paramsInfo,
705                                                                   reason);
706             break;
707         }
708         case LayerType::Division:
709         {
710             const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
711             const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
712             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
713             result = layerSupportObject->IsDivisionSupported(
714                                          OverrideDataType(input0, dataType),
715                                          OverrideDataType(input1, dataType),
716                                          OverrideDataType(output, dataType),
717                                          reason);
718             break;
719         }
720         case LayerType::Reshape:
721         {
722             auto cLayer = boost::polymorphic_downcast<const ReshapeLayer*>(&layer);
723             const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
724             result = layerSupportObject->IsReshapeSupported(OverrideDataType(input, dataType),
725                                                             cLayer->GetParameters(),
726                                                             reason);
727             break;
728         }
729         case LayerType::Resize:
730         {
731             auto cLayer = boost::polymorphic_downcast<const ResizeLayer*>(&layer);
732             const TensorInfo& input  = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
733             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
734             result = layerSupportObject->IsResizeSupported(OverrideDataType(input, dataType),
735                                                            OverrideDataType(output, dataType),
736                                                            cLayer->GetParameters(),
737                                                            reason);
738             break;
739         }
740         case LayerType::Rsqrt:
741         {
742             const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
743             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
744             result = layerSupportObject->IsRsqrtSupported(OverrideDataType(input, dataType),
745                                                           OverrideDataType(output, dataType),
746                                                           reason);
747             break;
748         }
749         case LayerType::Softmax:
750         {
751             auto cLayer = boost::polymorphic_downcast<const SoftmaxLayer*>(&layer);
752             const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
753             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
754             result = layerSupportObject->IsSoftmaxSupported(OverrideDataType(input, dataType),
755                                                             OverrideDataType(output, dataType),
756                                                             cLayer->GetParameters(),
757                                                             reason);
758             break;
759         }
760         case LayerType::SpaceToBatchNd:
761         {
762             auto cLayer = boost::polymorphic_downcast<const SpaceToBatchNdLayer*>(&layer);
763             const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
764             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
765             result = layerSupportObject->IsSpaceToBatchNdSupported(OverrideDataType(input, dataType),
766                                                                    OverrideDataType(output, dataType),
767                                                                    cLayer->GetParameters(),
768                                                                    reason);
769             break;
770         }
771         case LayerType::SpaceToDepth:
772         {
773             auto cLayer = boost::polymorphic_downcast<const SpaceToDepthLayer*>(&layer);
774
775             const TensorInfo& input  = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
776             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
777
778             result = layerSupportObject->IsSpaceToDepthSupported(OverrideDataType(input, dataType),
779                                                                  OverrideDataType(output, dataType),
780                                                                  cLayer->GetParameters(),
781                                                                  reason);
782             break;
783         }
784         case LayerType::Splitter:
785         {
786             auto cLayer = boost::polymorphic_downcast<const SplitterLayer*>(&layer);
787             const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
788
789             // Get vector of all outputs.
790             auto getTensorInfo = [&dataType](const OutputSlot& slot)
791             {
792                 return OverrideDataType(slot.GetTensorInfo(), dataType);
793             };
794             auto beginI = boost::make_transform_iterator(layer.GetOutputSlots().begin(), getTensorInfo);
795             auto endI = boost::make_transform_iterator(layer.GetOutputSlots().end(), getTensorInfo);
796             std::vector<TensorInfo> outputs(beginI, endI);
797
798             const std::vector<std::reference_wrapper<TensorInfo>> outputPtrs(outputs.begin(), outputs.end());
799
800             result = layerSupportObject->IsSplitterSupported(OverrideDataType(input, dataType),
801                                                              outputPtrs,
802                                                              cLayer->GetParameters(),
803                                                              reason);
804             break;
805         }
806         case LayerType::Stack:
807         {
808             auto cLayer = boost::polymorphic_downcast<const StackLayer*>(&layer);
809
810             // Get vector of all inputs.
811             auto getTensorInfo = [&dataType](const InputSlot& slot)
812                 {
813                     return OverrideDataType(slot.GetConnectedOutputSlot()->GetTensorInfo(), dataType);
814                 };
815             auto beginI = boost::make_transform_iterator(layer.GetInputSlots().begin(), getTensorInfo);
816             auto endI = boost::make_transform_iterator(layer.GetInputSlots().end(), getTensorInfo);
817             std::vector<TensorInfo> inputs(beginI, endI);
818
819             auto getTensorInfoPtr = [](const TensorInfo& info)
820                 {
821                     return &info;
822                 };
823             auto beginPtr = boost::make_transform_iterator(inputs.begin(), getTensorInfoPtr);
824             auto endPtr = boost::make_transform_iterator(inputs.end(), getTensorInfoPtr);
825             std::vector<const TensorInfo*> inputPtrs(beginPtr, endPtr);
826
827             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
828
829             result = layerSupportObject->IsStackSupported(inputPtrs, output, cLayer->GetParameters(), reason);
830
831             break;
832         }
833         case LayerType::StridedSlice:
834         {
835             auto cLayer = boost::polymorphic_downcast<const StridedSliceLayer*>(&layer);
836             const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
837             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
838             result = layerSupportObject->IsStridedSliceSupported(OverrideDataType(input, dataType),
839                                                                  OverrideDataType(output, dataType),
840                                                                  cLayer->GetParameters(),
841                                                                  reason);
842             break;
843         }
844         case LayerType::Subtraction:
845         {
846             const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
847             const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
848             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
849             result = layerSupportObject->IsSubtractionSupported(
850                                             OverrideDataType(input0, dataType),
851                                             OverrideDataType(input1, dataType),
852                                             OverrideDataType(output, dataType),
853                                             reason);
854             break;
855         }
856         case LayerType::Switch:
857         {
858             const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
859             const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
860             const TensorInfo& output0 = layer.GetOutputSlot(0).GetTensorInfo();
861             const TensorInfo& output1 = layer.GetOutputSlot(1).GetTensorInfo();
862             result = layerSupportObject->IsSwitchSupported(OverrideDataType(input0, dataType),
863                                                            OverrideDataType(input1, dataType),
864                                                            OverrideDataType(output0, dataType),
865                                                            OverrideDataType(output1, dataType),
866                                                            reason);
867             break;
868         }
869         case LayerType::Mean:
870         {
871             auto cLayer = boost::polymorphic_downcast<const MeanLayer*>(&layer);
872             const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
873             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
874             result = layerSupportObject->IsMeanSupported(
875                                      OverrideDataType(input, dataType),
876                                      OverrideDataType(output, dataType),
877                                      cLayer->GetParameters(),
878                                      reason);
879             break;
880         }
881         case LayerType::Minimum:
882         {
883             const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
884             const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
885             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
886             result = layerSupportObject->IsMinimumSupported(OverrideDataType(input0, dataType),
887                                                             OverrideDataType(input1, dataType),
888                                                             OverrideDataType(output, dataType),
889                                                             reason);
890             break;
891         }
892         case LayerType::Greater:
893         {
894             const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
895             const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
896             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
897             result = layerSupportObject->IsGreaterSupported(OverrideDataType(input0, dataType),
898                                                             OverrideDataType(input1, dataType),
899                                                             OverrideDataType(output, DataType::Boolean),
900                                                             reason);
901             break;
902         }
903         case LayerType::Prelu:
904         {
905             const TensorInfo& input  = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
906             const TensorInfo& alpha  = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
907             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
908             result = layerSupportObject->IsPreluSupported(OverrideDataType(input,  dataType),
909                                                           OverrideDataType(alpha,  dataType),
910                                                           OverrideDataType(output, dataType),
911                                                           reason);
912             break;
913         }
914         case LayerType::TransposeConvolution2d:
915         {
916             auto cLayer = boost::polymorphic_downcast<const TransposeConvolution2dLayer*>(&layer);
917
918             const TensorInfo input  = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(),
919                                                        dataType);
920             const TensorInfo output = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
921
922             const TransposeConvolution2dDescriptor& descriptor  = cLayer->GetParameters();
923
924             Optional<TensorInfo> biases;
925             if (descriptor.m_BiasEnabled)
926             {
927                 BOOST_ASSERT(cLayer->m_Bias.get() != nullptr);
928                 biases = OverrideDataType(cLayer->m_Bias->GetTensorInfo(),
929                                           GetBiasTypeFromWeightsType(dataType));
930             }
931
932             BOOST_ASSERT(cLayer->m_Weight.get() != nullptr);
933             const TensorInfo weights = OverrideDataType(cLayer->m_Weight->GetTensorInfo(), dataType);
934
935             result = layerSupportObject->IsTransposeConvolution2dSupported(input,
936                                                                            output,
937                                                                            descriptor,
938                                                                            weights,
939                                                                            biases,
940                                                                            reason);
941
942             break;
943         }
944         default:
945         {
946             BOOST_ASSERT_MSG(false, "WorkloadFactory did not recognise type of layer.");
947             reason.value() = "Unrecognised layer type";
948             result = false;
949             break;
950         }
951     }
952     return result;
953 }
954
955 bool IWorkloadFactory::IsLayerSupported(const IConnectableLayer& connectableLayer,
956                                         Optional<DataType> dataType,
957                                         std::string& outReasonIfUnsupported)
958 {
959     auto layer = boost::polymorphic_downcast<const Layer*>(&connectableLayer);
960     return IsLayerSupported(layer->GetBackendId(), connectableLayer, dataType, outReasonIfUnsupported);
961 }
962
963 // Default Implementations
964 std::unique_ptr<IWorkload> IWorkloadFactory::CreateAbs(const AbsQueueDescriptor& descriptor,
965                                                        const WorkloadInfo& info) const
966 {
967     return std::unique_ptr<IWorkload>();
968 }
969
970 std::unique_ptr<IWorkload> IWorkloadFactory::CreateActivation(const ActivationQueueDescriptor& descriptor,
971                                                               const WorkloadInfo& info) const
972 {
973     return std::unique_ptr<IWorkload>();
974 }
975
976 std::unique_ptr<IWorkload> IWorkloadFactory::CreateAddition(const AdditionQueueDescriptor& descriptor,
977                                                             const WorkloadInfo& info) const
978 {
979     return std::unique_ptr<IWorkload>();
980 }
981
982 std::unique_ptr<IWorkload> IWorkloadFactory::CreateBatchNormalization(
983     const BatchNormalizationQueueDescriptor& descriptor, const WorkloadInfo& info) const
984 {
985     return std::unique_ptr<IWorkload>();
986 }
987
988 std::unique_ptr<IWorkload> IWorkloadFactory::CreateBatchToSpaceNd(const BatchToSpaceNdQueueDescriptor& descriptor,
989                                                                   const WorkloadInfo& Info) const
990 {
991     return std::unique_ptr<IWorkload>();
992 }
993
994 std::unique_ptr<IWorkload> IWorkloadFactory::CreateConcat(const ConcatQueueDescriptor& descriptor,
995                                                           const WorkloadInfo& info) const
996 {
997     return std::unique_ptr<IWorkload>();
998 }
999
1000 std::unique_ptr<IWorkload> IWorkloadFactory::CreateConstant(const ConstantQueueDescriptor& descriptor,
1001                                                             const WorkloadInfo& info) const
1002 {
1003     return std::unique_ptr<IWorkload>();
1004 }
1005
1006 std::unique_ptr<IWorkload> IWorkloadFactory::CreateConvertFp16ToFp32(const ConvertFp16ToFp32QueueDescriptor& descriptor,
1007                                                                      const WorkloadInfo& info) const
1008 {
1009     return std::unique_ptr<IWorkload>();
1010 }
1011
1012 std::unique_ptr<IWorkload> IWorkloadFactory::CreateConvertFp32ToFp16(const ConvertFp32ToFp16QueueDescriptor& descriptor,
1013                                                                      const WorkloadInfo& info) const
1014 {
1015     return std::unique_ptr<IWorkload>();
1016 }
1017
1018 std::unique_ptr<IWorkload> IWorkloadFactory::CreateConvolution2d(const Convolution2dQueueDescriptor& descriptor,
1019                                                                  const WorkloadInfo& info) const
1020 {
1021     return std::unique_ptr<IWorkload>();
1022 }
1023
1024 std::unique_ptr<IWorkload> IWorkloadFactory::CreateDebug(const DebugQueueDescriptor& descriptor,
1025                                                          const WorkloadInfo& info) const
1026 {
1027     return std::unique_ptr<IWorkload>();
1028 }
1029
1030 std::unique_ptr<IWorkload> IWorkloadFactory::CreateDepthwiseConvolution2d(
1031     const DepthwiseConvolution2dQueueDescriptor& descriptor, const WorkloadInfo& info) const
1032 {
1033     return std::unique_ptr<IWorkload>();
1034 }
1035
1036 std::unique_ptr<IWorkload> IWorkloadFactory::CreateDequantize(
1037     const DequantizeQueueDescriptor& descriptor, const WorkloadInfo& info) const
1038 {
1039     return std::unique_ptr<IWorkload>();
1040 }
1041
1042 std::unique_ptr<IWorkload> IWorkloadFactory::CreateDetectionPostProcess(
1043     const DetectionPostProcessQueueDescriptor& descriptor, const WorkloadInfo& info) const
1044 {
1045     return std::unique_ptr<IWorkload>();
1046 }
1047
1048 std::unique_ptr<IWorkload> IWorkloadFactory::CreateDivision(const DivisionQueueDescriptor& descriptor,
1049                                                             const WorkloadInfo& info) const
1050 {
1051     return std::unique_ptr<IWorkload>();
1052 }
1053
1054 std::unique_ptr<IWorkload> IWorkloadFactory::CreateEqual(const EqualQueueDescriptor& descriptor,
1055                                                          const WorkloadInfo& Info) const
1056 {
1057     return std::unique_ptr<IWorkload>();
1058 }
1059
1060 std::unique_ptr<IWorkload> IWorkloadFactory::CreateFakeQuantization(const FakeQuantizationQueueDescriptor& descriptor,
1061                                                                     const WorkloadInfo& info) const
1062 {
1063     return std::unique_ptr<IWorkload>();
1064 }
1065
1066 std::unique_ptr<IWorkload> IWorkloadFactory::CreateFloor(const FloorQueueDescriptor& descriptor,
1067                                                          const WorkloadInfo& info) const
1068 {
1069     return std::unique_ptr<IWorkload>();
1070 }
1071
1072 std::unique_ptr<IWorkload> IWorkloadFactory::CreateFullyConnected(const FullyConnectedQueueDescriptor& descriptor,
1073                                                                   const WorkloadInfo& info) const
1074 {
1075     return std::unique_ptr<IWorkload>();
1076 }
1077
1078 std::unique_ptr<IWorkload> IWorkloadFactory::CreateGather(const GatherQueueDescriptor& descriptor,
1079                                                           const WorkloadInfo& info) const
1080 {
1081     return std::unique_ptr<IWorkload>();
1082 }
1083
1084 std::unique_ptr<IWorkload> IWorkloadFactory::CreateGreater(const GreaterQueueDescriptor& descriptor,
1085                                                            const WorkloadInfo& info) const
1086 {
1087     return std::unique_ptr<IWorkload>();
1088 }
1089
1090 std::unique_ptr<IWorkload> IWorkloadFactory::CreateL2Normalization(const L2NormalizationQueueDescriptor& descriptor,
1091                                                                    const WorkloadInfo& info) const
1092 {
1093     return std::unique_ptr<IWorkload>();
1094 }
1095
1096 std::unique_ptr<IWorkload> IWorkloadFactory::CreateLstm(const LstmQueueDescriptor& descriptor,
1097                                                         const WorkloadInfo& info) const
1098 {
1099     return std::unique_ptr<IWorkload>();
1100 }
1101
1102 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMaximum(const MaximumQueueDescriptor& descriptor,
1103                                                            const WorkloadInfo& info) const
1104 {
1105     return std::unique_ptr<IWorkload>();
1106 }
1107
1108 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMean(const MeanQueueDescriptor& descriptor,
1109                                                         const WorkloadInfo& Info) const
1110 {
1111     return std::unique_ptr<IWorkload>();
1112 }
1113
1114 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMemCopy(const MemCopyQueueDescriptor& descriptor,
1115                                                            const WorkloadInfo& info) const
1116 {
1117     return std::unique_ptr<IWorkload>();
1118 }
1119
1120 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMemImport(const MemImportQueueDescriptor& descriptor,
1121                                                              const WorkloadInfo& info) const
1122 {
1123     return std::unique_ptr<IWorkload>();
1124 }
1125
1126 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMerge(const MergeQueueDescriptor& descriptor,
1127                                                          const WorkloadInfo& info) const
1128 {
1129     return std::unique_ptr<IWorkload>();
1130 }
1131
1132 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMerger(const MergerQueueDescriptor& descriptor,
1133                                                           const WorkloadInfo& info) const
1134 {
1135     return std::unique_ptr<IWorkload>();
1136 }
1137
1138 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMinimum(const MinimumQueueDescriptor& descriptor,
1139                                                            const WorkloadInfo& info) const
1140 {
1141     return std::unique_ptr<IWorkload>();
1142 }
1143
1144 std::unique_ptr<IWorkload> IWorkloadFactory::CreateMultiplication(const MultiplicationQueueDescriptor& descriptor,
1145                                                                   const WorkloadInfo& info) const
1146 {
1147     return std::unique_ptr<IWorkload>();
1148 }
1149
1150 std::unique_ptr<IWorkload> IWorkloadFactory::CreateNormalization(const NormalizationQueueDescriptor& descriptor,
1151                                                                  const WorkloadInfo& info) const
1152 {
1153     return std::unique_ptr<IWorkload>();
1154 }
1155
1156 std::unique_ptr<IWorkload> IWorkloadFactory::CreateOutput(const OutputQueueDescriptor& descriptor,
1157                                                           const WorkloadInfo& info) const
1158 {
1159     return std::unique_ptr<IWorkload>();
1160 }
1161
1162 std::unique_ptr<IWorkload> IWorkloadFactory::CreatePad(const PadQueueDescriptor& descriptor,
1163                                                        const WorkloadInfo& Info) const
1164 {
1165     return std::unique_ptr<IWorkload>();
1166 }
1167
1168 std::unique_ptr<IWorkload> IWorkloadFactory::CreatePermute(const PermuteQueueDescriptor& descriptor,
1169                                                            const WorkloadInfo& info) const
1170 {
1171     return std::unique_ptr<IWorkload>();
1172 }
1173
1174 std::unique_ptr<IWorkload> IWorkloadFactory::CreatePooling2d(const Pooling2dQueueDescriptor& descriptor,
1175                                                              const WorkloadInfo& info) const
1176 {
1177     return std::unique_ptr<IWorkload>();
1178 }
1179
1180 std::unique_ptr<IWorkload> IWorkloadFactory::CreatePreCompiled(const PreCompiledQueueDescriptor& descriptor,
1181                                                                const WorkloadInfo& info) const
1182 {
1183     return std::unique_ptr<IWorkload>();
1184 }
1185
1186 std::unique_ptr<IWorkload> IWorkloadFactory::CreatePrelu(const PreluQueueDescriptor &descriptor,
1187                                                          const WorkloadInfo &info) const
1188 {
1189     return std::unique_ptr<IWorkload>();
1190 }
1191
1192 std::unique_ptr<IWorkload> IWorkloadFactory::CreateQuantize(const QuantizeQueueDescriptor& descriptor,
1193                                                             const WorkloadInfo& Info) const
1194 {
1195     return std::unique_ptr<IWorkload>();
1196 }
1197
1198 std::unique_ptr<IWorkload> IWorkloadFactory::CreateQuantizedLstm(const QuantizedLstmQueueDescriptor& descriptor,
1199                                                                  const WorkloadInfo& info) const
1200 {
1201     return std::unique_ptr<IWorkload>();
1202 }
1203
1204 std::unique_ptr<IWorkload> IWorkloadFactory::CreateReshape(const ReshapeQueueDescriptor& descriptor,
1205                                                            const WorkloadInfo& info) const
1206 {
1207     return std::unique_ptr<IWorkload>();
1208 }
1209
1210 std::unique_ptr<IWorkload> IWorkloadFactory::CreateResizeBilinear(const ResizeBilinearQueueDescriptor& descriptor,
1211                                                                   const WorkloadInfo& info) const
1212 {
1213     return std::unique_ptr<IWorkload>();
1214 }
1215
1216 std::unique_ptr<IWorkload> IWorkloadFactory::CreateResize(const ResizeQueueDescriptor& descriptor,
1217                                                             const WorkloadInfo& info) const
1218 {
1219     return std::unique_ptr<IWorkload>();
1220 }
1221
1222 std::unique_ptr<IWorkload> IWorkloadFactory::CreateRsqrt(const RsqrtQueueDescriptor& descriptor,
1223                                                          const WorkloadInfo& info) const
1224 {
1225     return std::unique_ptr<IWorkload>();
1226 }
1227
1228 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSoftmax(const SoftmaxQueueDescriptor& descriptor,
1229                                                            const WorkloadInfo& info) const
1230 {
1231     return std::unique_ptr<IWorkload>();
1232 }
1233
1234 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSplitter(const SplitterQueueDescriptor& descriptor,
1235                                                             const WorkloadInfo& info) const
1236 {
1237     return std::unique_ptr<IWorkload>();
1238 }
1239
1240 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSpaceToBatchNd(const SpaceToBatchNdQueueDescriptor& descriptor,
1241                                                                   const WorkloadInfo& info) const
1242 {
1243     return std::unique_ptr<IWorkload>();
1244 }
1245
1246 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSpaceToDepth(const SpaceToDepthQueueDescriptor& descriptor,
1247                                                                 const WorkloadInfo& info) const
1248 {
1249     return std::unique_ptr<IWorkload>();
1250 }
1251
1252 std::unique_ptr<IWorkload> IWorkloadFactory::CreateStack(const StackQueueDescriptor& descriptor,
1253                                                          const WorkloadInfo& info) const
1254 {
1255     return std::unique_ptr<IWorkload>();
1256 }
1257
1258 std::unique_ptr<IWorkload> IWorkloadFactory::CreateStridedSlice(const StridedSliceQueueDescriptor& descriptor,
1259                                                                 const WorkloadInfo& Info) const
1260 {
1261     return std::unique_ptr<IWorkload>();
1262 }
1263
1264 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSubtraction(const SubtractionQueueDescriptor& descriptor,
1265                                                                const WorkloadInfo& info) const
1266 {
1267     return std::unique_ptr<IWorkload>();
1268 }
1269
1270 std::unique_ptr<IWorkload> IWorkloadFactory::CreateSwitch(const SwitchQueueDescriptor& descriptor,
1271                                                           const WorkloadInfo& info) const
1272 {
1273     return std::unique_ptr<IWorkload>();
1274 }
1275
1276 std::unique_ptr<IWorkload> IWorkloadFactory::CreateTransposeConvolution2d(
1277     const TransposeConvolution2dQueueDescriptor& descriptor,
1278     const WorkloadInfo& info) const
1279 {
1280     return std::unique_ptr<IWorkload>();
1281 }
1282
1283 } // namepsace armnn