2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
6 #include "CpuTensorHandle.hpp"
9 #include <LayersFwd.hpp>
11 #include <armnn/Types.hpp>
12 #include <armnn/LayerSupport.hpp>
13 #include <armnn/ILayerSupport.hpp>
15 #include <backendsCommon/BackendRegistry.hpp>
16 #include <backendsCommon/WorkloadFactory.hpp>
17 #include <backendsCommon/IBackendInternal.hpp>
19 #include <boost/cast.hpp>
20 #include <boost/iterator/transform_iterator.hpp>
31 const TensorInfo OverrideDataType(const TensorInfo& info, Optional<DataType> type)
38 return TensorInfo(info.GetShape(), type.value(), info.GetQuantizationScale(), info.GetQuantizationOffset());
41 Optional<DataType> GetBiasTypeFromWeightsType(Optional<DataType> weightsType)
48 switch(weightsType.value())
50 case DataType::Float16:
51 case DataType::Float32:
53 case DataType::QuantisedAsymm8:
54 return DataType::Signed32;
56 BOOST_ASSERT_MSG(false, "GetBiasTypeFromWeightsType(): Unsupported data type.");
58 return EmptyOptional();
61 } // anonymous namespace
63 bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId,
64 const IConnectableLayer& connectableLayer,
65 Optional<DataType> dataType,
66 std::string& outReasonIfUnsupported)
68 Optional<std::string&> reason = outReasonIfUnsupported;
70 const Layer& layer = *(boost::polymorphic_downcast<const Layer*>(&connectableLayer));
72 auto const& backendRegistry = BackendRegistryInstance();
73 if (!backendRegistry.IsBackendRegistered(backendId))
76 ss << connectableLayer.GetName() << " is not supported on " << backendId
77 << " because this backend is not registered.";
79 outReasonIfUnsupported = ss.str();
83 auto backendFactory = backendRegistry.GetFactory(backendId);
84 auto backendObject = backendFactory();
85 auto layerSupportObject = backendObject->GetLayerSupport();
87 switch(layer.GetType())
89 case LayerType::Activation:
91 auto cLayer = boost::polymorphic_downcast<const ActivationLayer*>(&layer);
92 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
93 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
94 result = layerSupportObject->IsActivationSupported(
95 OverrideDataType(input, dataType),
96 OverrideDataType(output, dataType),
97 cLayer->GetParameters(),
101 case LayerType::Addition:
103 const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
104 const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
105 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
106 result = layerSupportObject->IsAdditionSupported(
107 OverrideDataType(input0, dataType),
108 OverrideDataType(input1, dataType),
109 OverrideDataType(output, dataType),
113 case LayerType::BatchNormalization:
115 auto cLayer = boost::polymorphic_downcast<const BatchNormalizationLayer*>(&layer);
116 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
117 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
118 const TensorInfo& mean = cLayer->m_Mean->GetTensorInfo();
119 const TensorInfo& var = cLayer->m_Variance->GetTensorInfo();
120 const TensorInfo& beta = cLayer->m_Beta->GetTensorInfo();
121 const TensorInfo& gamma = cLayer->m_Gamma->GetTensorInfo();
122 result = layerSupportObject->IsBatchNormalizationSupported(
123 OverrideDataType(input, dataType),
124 OverrideDataType(output, dataType),
125 OverrideDataType(mean, dataType),
126 OverrideDataType(var, dataType),
127 OverrideDataType(beta, dataType),
128 OverrideDataType(gamma, dataType),
129 cLayer->GetParameters(),
133 case LayerType::BatchToSpaceNd:
135 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
136 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
137 auto cLayer = boost::polymorphic_downcast<const BatchToSpaceNdLayer*>(&layer);
139 result = layerSupportObject->IsBatchToSpaceNdSupported(OverrideDataType(input, dataType),
140 OverrideDataType(output, dataType),
141 cLayer->GetParameters(),
145 case LayerType::Constant:
147 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
148 result = layerSupportObject->IsConstantSupported(OverrideDataType(output, dataType), reason);
151 case LayerType::ConvertFp16ToFp32:
153 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
154 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
155 result = layerSupportObject->IsConvertFp16ToFp32Supported(input, output, reason);
158 case LayerType::ConvertFp32ToFp16:
160 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
161 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
162 result = layerSupportObject->IsConvertFp32ToFp16Supported(input, output, reason);
165 case LayerType::Convolution2d:
167 auto cLayer = boost::polymorphic_downcast<const Convolution2dLayer*>(&layer);
169 const TensorInfo input = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(),
171 const TensorInfo output = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
172 BOOST_ASSERT(cLayer->m_Weight.get() != nullptr);
174 const Convolution2dDescriptor& descriptor = cLayer->GetParameters();
176 // Construct optional biases object based on the value of m_BiasEnabled
177 Optional<TensorInfo> biases;
178 if (descriptor.m_BiasEnabled)
181 OverrideDataType(cLayer->m_Bias->GetTensorInfo(), GetBiasTypeFromWeightsType(dataType));
184 result = layerSupportObject->IsConvolution2dSupported(
188 OverrideDataType(cLayer->m_Weight->GetTensorInfo(), dataType),
193 case LayerType::MemCopy:
195 // MemCopy supported for CpuRef, CpuAcc and GpuAcc backends,
196 // (also treat Undefined as CpuRef to avoid breaking lots of Unit tests).
197 result = backendId == Compute::CpuRef || backendId == Compute::Undefined
198 || backendId == Compute::CpuAcc || backendId == Compute::GpuAcc;
199 reason.value() = "Unsupported backend type";
202 case LayerType::DepthwiseConvolution2d:
204 auto cLayer = boost::polymorphic_downcast<const DepthwiseConvolution2dLayer*>(&layer);
205 const TensorInfo& input = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(),
207 const TensorInfo& output = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
208 BOOST_ASSERT(cLayer->m_Weight.get() != nullptr);
210 const DepthwiseConvolution2dDescriptor& descriptor = cLayer->GetParameters();
212 // Construct optional biases object based on the value of m_BiasEnabled
213 Optional<TensorInfo> biases;
214 if (descriptor.m_BiasEnabled)
217 OverrideDataType(cLayer->m_Bias->GetTensorInfo(), GetBiasTypeFromWeightsType(dataType));
220 result = layerSupportObject->IsDepthwiseConvolutionSupported(
224 OverrideDataType(cLayer->m_Weight->GetTensorInfo(), dataType),
229 case LayerType::FakeQuantization:
231 auto cLayer = boost::polymorphic_downcast<const FakeQuantizationLayer*>(&layer);
232 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
233 result = layerSupportObject->IsFakeQuantizationSupported(OverrideDataType(input, dataType),
234 cLayer->GetParameters(),
238 case LayerType::Floor:
240 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
241 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
242 result = layerSupportObject->IsFloorSupported(OverrideDataType(input, dataType),
243 OverrideDataType(output, dataType),
247 case LayerType::FullyConnected:
249 auto cLayer = boost::polymorphic_downcast<const FullyConnectedLayer*>(&layer);
250 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
251 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
252 BOOST_ASSERT(cLayer->m_Weight.get() != nullptr);
255 const TensorInfo * biasInfoPtr = nullptr;
256 static const TensorInfo dummyFloat16Bias(TensorShape({1,1,1,1}), DataType::Float16);
257 static const TensorInfo dummyFloat32Bias(TensorShape({1,1,1,1}), DataType::Float32);
258 static const TensorInfo dummyQA8Bias(TensorShape({1,1,1,1}), DataType::Signed32);
260 const FullyConnectedDescriptor& descriptor = cLayer->GetParameters();
261 if (descriptor.m_BiasEnabled)
263 BOOST_ASSERT(cLayer->m_Bias.get() != nullptr);
264 biasInfo = OverrideDataType(cLayer->m_Bias->GetTensorInfo(), GetBiasTypeFromWeightsType(dataType));
265 biasInfoPtr = &biasInfo;
269 // If biases are not enabled pass a dummy tensorinfo for the validation
270 switch(input.GetDataType())
272 case DataType::Float16:
274 biasInfoPtr = &dummyFloat16Bias;
277 case DataType::Float32:
279 biasInfoPtr = &dummyFloat32Bias;
282 case DataType::QuantisedAsymm8:
284 biasInfoPtr = &dummyQA8Bias;
289 BOOST_ASSERT_MSG(false, "Unexpected bias type");
294 result = layerSupportObject->IsFullyConnectedSupported(
295 OverrideDataType(input, dataType),
296 OverrideDataType(output, dataType),
297 OverrideDataType(cLayer->m_Weight->GetTensorInfo(), dataType),
303 case LayerType::Input:
305 const TensorInfo& input = layer.GetOutputSlot(0).GetTensorInfo();
306 result = layerSupportObject->IsInputSupported(OverrideDataType(input, dataType), reason);
309 case LayerType::L2Normalization:
311 auto cLayer = boost::polymorphic_downcast<const L2NormalizationLayer*>(&layer);
312 const L2NormalizationDescriptor& descriptor = cLayer->GetParameters();
314 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
315 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
317 result = layerSupportObject->IsL2NormalizationSupported(
318 OverrideDataType(input, dataType),
319 OverrideDataType(output, dataType),
324 case LayerType::Lstm:
326 auto cLayer = boost::polymorphic_downcast<const LstmLayer*>(&layer);
327 const LstmDescriptor& descriptor = cLayer->GetParameters();
330 const TensorInfo& input = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(),
332 const TensorInfo& outputStateIn = OverrideDataType(layer.GetInputSlot(1).GetConnection()->GetTensorInfo(),
334 const TensorInfo& cellStateIn = OverrideDataType(layer.GetInputSlot(2).GetConnection()->GetTensorInfo(),
337 const TensorInfo& scratchBuffer = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
338 const TensorInfo& outputStateOut = OverrideDataType(layer.GetOutputSlot(1).GetTensorInfo(), dataType);
339 const TensorInfo& cellStateOut = OverrideDataType(layer.GetOutputSlot(2).GetTensorInfo(), dataType);
340 const TensorInfo& output = OverrideDataType(layer.GetOutputSlot(3).GetTensorInfo(), dataType);
343 const TensorInfo& inputToForgetWeights
344 = OverrideDataType(cLayer->m_BasicParameters.m_InputToForgetWeights->GetTensorInfo(), dataType);
345 const TensorInfo& inputToCellWeights
346 = OverrideDataType(cLayer->m_BasicParameters.m_InputToCellWeights->GetTensorInfo(), dataType);
347 const TensorInfo& inputToOutputWeights
348 = OverrideDataType(cLayer->m_BasicParameters.m_InputToOutputWeights->GetTensorInfo(), dataType);
349 const TensorInfo& recurrentToForgetWeights
350 = OverrideDataType(cLayer->m_BasicParameters.m_RecurrentToForgetWeights->GetTensorInfo(), dataType);
351 const TensorInfo& recurrentToCellWeights
352 = OverrideDataType(cLayer->m_BasicParameters.m_RecurrentToCellWeights->GetTensorInfo(), dataType);
353 const TensorInfo& recurrentToOutputWeights
354 = OverrideDataType(cLayer->m_BasicParameters.m_RecurrentToOutputWeights->GetTensorInfo(), dataType);
355 const TensorInfo& forgetGateBias
356 = OverrideDataType(cLayer->m_BasicParameters.m_ForgetGateBias->GetTensorInfo(), dataType);
357 const TensorInfo& cellBias
358 = OverrideDataType(cLayer->m_BasicParameters.m_CellBias->GetTensorInfo(), dataType);
359 const TensorInfo& outputGateBias
360 = OverrideDataType(cLayer->m_BasicParameters.m_OutputGateBias->GetTensorInfo(), dataType);
362 // Optional parameters
363 const TensorInfo* inputToInputWeights = nullptr;
364 const TensorInfo* recurrentToInputWeights = nullptr;
365 const TensorInfo* cellToInputWeights = nullptr;
366 const TensorInfo* inputGateBias = nullptr;
367 const TensorInfo* projectionWeights = nullptr;
368 const TensorInfo* projectionBias = nullptr;
369 const TensorInfo* cellToForgetWeights = nullptr;
370 const TensorInfo* cellToOutputWeights = nullptr;
372 TensorInfo optInputToInputWeights;
373 TensorInfo optRecurrentToInputWeights;
374 TensorInfo optCellToInputWeights;
375 TensorInfo optInputGateBias;
376 TensorInfo optProjectionWeights;
377 TensorInfo optProjectionBias;
378 TensorInfo optCellToForgetWeights;
379 TensorInfo optCellToOutputWeights;
381 if(!descriptor.m_CifgEnabled)
383 optInputToInputWeights =
384 OverrideDataType(cLayer->m_CifgParameters.m_InputToInputWeights->GetTensorInfo(), dataType);
385 inputToInputWeights = &optInputToInputWeights;
387 optRecurrentToInputWeights =
388 OverrideDataType(cLayer->m_CifgParameters.m_RecurrentToInputWeights->GetTensorInfo(), dataType);
389 recurrentToInputWeights = &optRecurrentToInputWeights;
390 if (cLayer->m_CifgParameters.m_CellToInputWeights != nullptr)
392 optCellToInputWeights =
393 OverrideDataType(cLayer->m_CifgParameters.m_CellToInputWeights->GetTensorInfo(), dataType);
394 cellToInputWeights = &optCellToInputWeights;
397 OverrideDataType(cLayer->m_CifgParameters.m_InputGateBias->GetTensorInfo(), dataType);
398 inputGateBias = &optInputGateBias;
401 if(descriptor.m_ProjectionEnabled)
403 optProjectionWeights =
404 OverrideDataType(cLayer->m_ProjectionParameters.m_ProjectionWeights->GetTensorInfo(), dataType);
405 projectionWeights = &optProjectionWeights;
406 if (cLayer->m_ProjectionParameters.m_ProjectionBias != nullptr)
409 OverrideDataType(cLayer->m_ProjectionParameters.m_ProjectionBias->GetTensorInfo(), dataType);
410 projectionBias = &optProjectionBias;
414 if(descriptor.m_PeepholeEnabled)
416 optCellToForgetWeights =
417 OverrideDataType(cLayer->m_PeepholeParameters.m_CellToForgetWeights->GetTensorInfo(), dataType);
418 cellToForgetWeights = &optCellToForgetWeights;
419 optCellToOutputWeights =
420 OverrideDataType(cLayer->m_PeepholeParameters.m_CellToOutputWeights->GetTensorInfo(), dataType);
421 cellToOutputWeights = &optCellToOutputWeights;
424 result = layerSupportObject->IsLstmSupported(
433 inputToForgetWeights,
435 inputToOutputWeights,
436 recurrentToForgetWeights,
437 recurrentToCellWeights,
438 recurrentToOutputWeights,
443 recurrentToInputWeights,
453 case LayerType::Merger:
455 auto cLayer = boost::polymorphic_downcast<const MergerLayer*>(&layer);
457 // Get vector of all inputs.
458 auto getTensorInfo = [&dataType](const InputSlot& slot)
460 return OverrideDataType(slot.GetConnectedOutputSlot()->GetTensorInfo(), dataType);
462 auto beginI = boost::make_transform_iterator(layer.GetInputSlots().begin(), getTensorInfo);
463 auto endI = boost::make_transform_iterator(layer.GetInputSlots().end(), getTensorInfo);
464 std::vector<TensorInfo> inputs(beginI, endI);
466 auto getTensorInfoPtr = [](const TensorInfo& info)
470 auto beginPtr = boost::make_transform_iterator(inputs.begin(), getTensorInfoPtr);
471 auto endPtr = boost::make_transform_iterator(inputs.end(), getTensorInfoPtr);
472 std::vector<const TensorInfo*> inputPtrs(beginPtr, endPtr);
474 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
476 result = layerSupportObject->IsMergerSupported(inputPtrs, output, cLayer->GetParameters(), reason);
479 case LayerType::Multiplication:
481 const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
482 const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
483 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
484 result = layerSupportObject->IsMultiplicationSupported(
485 OverrideDataType(input0, dataType),
486 OverrideDataType(input1, dataType),
487 OverrideDataType(output, dataType),
491 case LayerType::Normalization:
493 auto cLayer = boost::polymorphic_downcast<const NormalizationLayer*>(&layer);
494 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
495 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
496 result = layerSupportObject->IsNormalizationSupported(OverrideDataType(input, dataType),
497 OverrideDataType(output, dataType),
498 cLayer->GetParameters(),
502 case LayerType::Output:
504 const TensorInfo& output = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
505 result = layerSupportObject->IsOutputSupported(OverrideDataType(output, dataType), reason);
508 case LayerType::Permute:
510 auto cLayer = boost::polymorphic_downcast<const PermuteLayer*>(&layer);
511 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
512 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
513 result = layerSupportObject->IsPermuteSupported(OverrideDataType(input, dataType),
514 OverrideDataType(output, dataType),
515 cLayer->GetParameters(),
521 auto cLayer = boost::polymorphic_downcast<const PadLayer*>(&layer);
522 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
523 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
524 result = layerSupportObject->IsPadSupported(
525 OverrideDataType(input, dataType),
526 OverrideDataType(output, dataType),
527 cLayer->GetParameters(),
531 case LayerType::Pooling2d:
533 auto cLayer = boost::polymorphic_downcast<const Pooling2dLayer*>(&layer);
534 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
535 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
536 result = layerSupportObject->IsPooling2dSupported(OverrideDataType(input, dataType),
537 OverrideDataType(output, dataType),
538 cLayer->GetParameters(),
542 case LayerType::Division:
544 const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
545 const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
546 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
547 result = layerSupportObject->IsDivisionSupported(
548 OverrideDataType(input0, dataType),
549 OverrideDataType(input1, dataType),
550 OverrideDataType(output, dataType),
554 case LayerType::Reshape:
556 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
557 result = layerSupportObject->IsReshapeSupported(OverrideDataType(input, dataType), reason);
560 case LayerType::ResizeBilinear:
562 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
563 result = layerSupportObject->IsResizeBilinearSupported(OverrideDataType(input, dataType), reason);
566 case LayerType::Softmax:
568 auto cLayer = boost::polymorphic_downcast<const SoftmaxLayer*>(&layer);
569 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
570 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
571 result = layerSupportObject->IsSoftmaxSupported(OverrideDataType(input, dataType),
572 OverrideDataType(output, dataType),
573 cLayer->GetParameters(),
577 case LayerType::SpaceToBatchNd:
579 auto cLayer = boost::polymorphic_downcast<const SpaceToBatchNdLayer*>(&layer);
580 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
581 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
582 result = layerSupportObject->IsSpaceToBatchNdSupported(OverrideDataType(input, dataType),
583 OverrideDataType(output, dataType),
584 cLayer->GetParameters(),
588 case LayerType::Splitter:
590 auto cLayer = boost::polymorphic_downcast<const SplitterLayer*>(&layer);
591 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
592 result = layerSupportObject->IsSplitterSupported(OverrideDataType(input, dataType),
593 cLayer->GetParameters(),
597 case LayerType::StridedSlice:
599 auto cLayer = boost::polymorphic_downcast<const StridedSliceLayer*>(&layer);
600 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
601 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
602 result = layerSupportObject->IsStridedSliceSupported(OverrideDataType(input, dataType),
603 OverrideDataType(output, dataType),
604 cLayer->GetParameters(),
608 case LayerType::Subtraction:
610 const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
611 const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
612 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
613 result = layerSupportObject->IsSubtractionSupported(
614 OverrideDataType(input0, dataType),
615 OverrideDataType(input1, dataType),
616 OverrideDataType(output, dataType),
620 case LayerType::Mean:
622 auto cLayer = boost::polymorphic_downcast<const MeanLayer*>(&layer);
623 const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
624 const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
625 result = layerSupportObject->IsMeanSupported(
626 OverrideDataType(input, dataType),
627 OverrideDataType(output, dataType),
628 cLayer->GetParameters(),
634 BOOST_ASSERT_MSG(false, "WorkloadFactory did not recognise type of layer.");
635 reason.value() = "Unrecognised layer type";
643 bool IWorkloadFactory::IsLayerSupported(const IConnectableLayer& connectableLayer,
644 Optional<DataType> dataType,
645 std::string& outReasonIfUnsupported)
647 auto layer = boost::polymorphic_downcast<const Layer*>(&connectableLayer);
648 return IsLayerSupported(layer->GetBackendId(), connectableLayer, dataType, outReasonIfUnsupported);