21 #include <boost/cast.hpp> 22 #include <boost/iterator/transform_iterator.hpp> 33 const TensorInfo OverrideDataType(
const TensorInfo& info, Optional<DataType> type)
40 return TensorInfo(info.GetShape(), type.value(), info.GetQuantizationScale(), info.GetQuantizationOffset());
48 std::string& outReasonIfUnsupported)
52 const Layer& layer = *(boost::polymorphic_downcast<const Layer*>(&connectableLayer));
55 if (!backendRegistry.IsBackendRegistered(backendId))
58 ss << connectableLayer.
GetName() <<
" is not supported on " << backendId
59 <<
" because this backend is not registered.";
61 outReasonIfUnsupported = ss.str();
65 auto backendFactory = backendRegistry.GetFactory(backendId);
66 auto backendObject = backendFactory();
67 auto layerSupportObject = backendObject->GetLayerSupport();
73 auto cLayer = boost::polymorphic_downcast<const ActivationLayer*>(&layer);
76 result = layerSupportObject->IsActivationSupported(
77 OverrideDataType(input, dataType),
78 OverrideDataType(output, dataType),
79 cLayer->GetParameters(),
88 result = layerSupportObject->IsAdditionSupported(
89 OverrideDataType(input0, dataType),
90 OverrideDataType(input1, dataType),
91 OverrideDataType(output, dataType),
97 auto cLayer = boost::polymorphic_downcast<const ArgMinMaxLayer*>(&layer);
102 result = layerSupportObject->IsArgMinMaxSupported(
103 OverrideDataType(input, dataType),
111 auto cLayer = boost::polymorphic_downcast<const BatchNormalizationLayer*>(&layer);
114 const TensorInfo& mean = cLayer->m_Mean->GetTensorInfo();
115 const TensorInfo& var = cLayer->m_Variance->GetTensorInfo();
116 const TensorInfo& beta = cLayer->m_Beta->GetTensorInfo();
117 const TensorInfo& gamma = cLayer->m_Gamma->GetTensorInfo();
118 result = layerSupportObject->IsBatchNormalizationSupported(
119 OverrideDataType(input, dataType),
120 OverrideDataType(output, dataType),
121 OverrideDataType(mean, dataType),
122 OverrideDataType(var, dataType),
123 OverrideDataType(beta, dataType),
124 OverrideDataType(gamma, dataType),
125 cLayer->GetParameters(),
133 auto cLayer = boost::polymorphic_downcast<const BatchToSpaceNdLayer*>(&layer);
135 result = layerSupportObject->IsBatchToSpaceNdSupported(OverrideDataType(input, dataType),
136 OverrideDataType(output, dataType),
137 cLayer->GetParameters(),
143 auto cLayer = boost::polymorphic_downcast<const ComparisonLayer*>(&layer);
149 result = layerSupportObject->IsComparisonSupported(OverrideDataType(input0, dataType),
150 OverrideDataType(input1, dataType),
152 cLayer->GetParameters(),
159 result = layerSupportObject->IsConstantSupported(OverrideDataType(output, dataType), reason);
166 result = layerSupportObject->IsConvertFp16ToFp32Supported(input, output, reason);
173 result = layerSupportObject->IsConvertFp32ToFp16Supported(input, output, reason);
178 auto cLayer = boost::polymorphic_downcast<const Convolution2dLayer*>(&layer);
183 BOOST_ASSERT(cLayer->m_Weight.get() !=
nullptr);
189 if (descriptor.m_BiasEnabled)
195 result = layerSupportObject->IsConvolution2dSupported(
199 OverrideDataType(cLayer->m_Weight->GetTensorInfo(), dataType),
209 result = layerSupportObject->IsDebugSupported(OverrideDataType(input, dataType),
210 OverrideDataType(output, dataType),
216 auto cLayer = boost::polymorphic_downcast<const DepthToSpaceLayer*>(&layer);
221 result = layerSupportObject->IsDepthToSpaceSupported(OverrideDataType(input, dataType),
222 OverrideDataType(output, dataType),
223 cLayer->GetParameters(),
229 auto cLayer = boost::polymorphic_downcast<const DepthwiseConvolution2dLayer*>(&layer);
233 BOOST_ASSERT(cLayer->m_Weight.get() !=
nullptr);
239 if (descriptor.m_BiasEnabled)
245 result = layerSupportObject->IsDepthwiseConvolutionSupported(
249 OverrideDataType(cLayer->m_Weight->GetTensorInfo(), dataType),
259 result = layerSupportObject->IsDequantizeSupported(input,
260 OverrideDataType(output, dataType),
266 auto cLayer = boost::polymorphic_downcast<const DetectionPostProcessLayer*>(&layer);
277 result = layerSupportObject->IsDetectionPostProcessSupported(boxEncodings,
290 auto cLayer = boost::polymorphic_downcast<const ElementwiseUnaryLayer*>(&layer);
295 result = layerSupportObject->IsElementwiseUnarySupported(OverrideDataType(input, dataType),
296 OverrideDataType(output, dataType),
297 cLayer->GetParameters(),
303 auto cLayer = boost::polymorphic_downcast<const FakeQuantizationLayer*>(&layer);
305 result = layerSupportObject->IsFakeQuantizationSupported(OverrideDataType(input, dataType),
306 cLayer->GetParameters(),
314 result = layerSupportObject->IsFloorSupported(OverrideDataType(input, dataType),
315 OverrideDataType(output, dataType),
321 auto cLayer = boost::polymorphic_downcast<const FullyConnectedLayer*>(&layer);
324 BOOST_ASSERT(cLayer->m_Weight.get() !=
nullptr);
335 BOOST_ASSERT(cLayer->m_Bias.get() !=
nullptr);
337 biasInfoPtr = &biasInfo;
342 switch(input.GetDataType())
346 biasInfoPtr = &dummyFloat16Bias;
351 biasInfoPtr = &dummyFloat32Bias;
359 biasInfoPtr = &dummyQA8Bias;
364 BOOST_ASSERT_MSG(
false,
"Unexpected bias type");
369 result = layerSupportObject->IsFullyConnectedSupported(
370 OverrideDataType(input, dataType),
371 OverrideDataType(output, dataType),
372 OverrideDataType(cLayer->m_Weight->GetTensorInfo(), dataType),
383 result = layerSupportObject->IsGatherSupported(OverrideDataType(input0, dataType),
385 OverrideDataType(output, dataType),
392 result = layerSupportObject->IsInputSupported(OverrideDataType(input, dataType), reason);
397 auto cLayer = boost::polymorphic_downcast<const InstanceNormalizationLayer*>(&layer);
403 result = layerSupportObject->IsInstanceNormalizationSupported(
404 OverrideDataType(input, dataType),
405 OverrideDataType(output, dataType),
412 auto cLayer = boost::polymorphic_downcast<const L2NormalizationLayer*>(&layer);
418 result = layerSupportObject->IsL2NormalizationSupported(
419 OverrideDataType(input, dataType),
420 OverrideDataType(output, dataType),
427 auto cLayer = boost::polymorphic_downcast<const LogSoftmaxLayer*>(&layer);
432 result = layerSupportObject->IsLogSoftmaxSupported(OverrideDataType(input, dataType),
433 OverrideDataType(output, dataType),
434 cLayer->GetParameters(),
440 auto cLayer = boost::polymorphic_downcast<const LstmLayer*>(&layer);
458 = OverrideDataType(cLayer->m_BasicParameters.m_InputToForgetWeights->GetTensorInfo(), dataType);
460 = OverrideDataType(cLayer->m_BasicParameters.m_InputToCellWeights->GetTensorInfo(), dataType);
462 = OverrideDataType(cLayer->m_BasicParameters.m_InputToOutputWeights->GetTensorInfo(), dataType);
464 = OverrideDataType(cLayer->m_BasicParameters.m_RecurrentToForgetWeights->GetTensorInfo(), dataType);
466 = OverrideDataType(cLayer->m_BasicParameters.m_RecurrentToCellWeights->GetTensorInfo(), dataType);
468 = OverrideDataType(cLayer->m_BasicParameters.m_RecurrentToOutputWeights->GetTensorInfo(), dataType);
470 = OverrideDataType(cLayer->m_BasicParameters.m_ForgetGateBias->GetTensorInfo(), dataType);
472 = OverrideDataType(cLayer->m_BasicParameters.m_CellBias->GetTensorInfo(), dataType);
474 = OverrideDataType(cLayer->m_BasicParameters.m_OutputGateBias->GetTensorInfo(), dataType);
479 paramsInfo.m_InputToCellWeights = &inputToCellWeights;
480 paramsInfo.m_InputToOutputWeights = &inputToOutputWeights;
481 paramsInfo.m_RecurrentToForgetWeights = &recurrentToForgetWeights;
482 paramsInfo.m_RecurrentToCellWeights = &recurrentToCellWeights;
483 paramsInfo.m_RecurrentToOutputWeights = &recurrentToOutputWeights;
484 paramsInfo.m_ForgetGateBias = &forgetGateBias;
485 paramsInfo.m_CellBias = &cellBias;
486 paramsInfo.m_OutputGateBias = &outputGateBias;
503 if(!descriptor.m_CifgEnabled)
505 optInputToInputWeights =
506 OverrideDataType(cLayer->m_CifgParameters.m_InputToInputWeights->GetTensorInfo(), dataType);
507 paramsInfo.m_InputToInputWeights = &optInputToInputWeights;
509 optRecurrentToInputWeights =
510 OverrideDataType(cLayer->m_CifgParameters.m_RecurrentToInputWeights->GetTensorInfo(), dataType);
511 paramsInfo.m_RecurrentToInputWeights = &optRecurrentToInputWeights;
512 if (cLayer->m_CifgParameters.m_CellToInputWeights !=
nullptr)
514 optCellToInputWeights =
515 OverrideDataType(cLayer->m_CifgParameters.m_CellToInputWeights->GetTensorInfo(), dataType);
516 paramsInfo.m_CellToInputWeights = &optCellToInputWeights;
519 OverrideDataType(cLayer->m_CifgParameters.m_InputGateBias->GetTensorInfo(), dataType);
520 paramsInfo.m_InputGateBias = &optInputGateBias;
523 if(descriptor.m_ProjectionEnabled)
525 optProjectionWeights =
526 OverrideDataType(cLayer->m_ProjectionParameters.m_ProjectionWeights->GetTensorInfo(), dataType);
527 paramsInfo.m_ProjectionWeights = &optProjectionWeights;
528 if (cLayer->m_ProjectionParameters.m_ProjectionBias !=
nullptr)
531 OverrideDataType(cLayer->m_ProjectionParameters.m_ProjectionBias->GetTensorInfo(), dataType);
532 paramsInfo.m_ProjectionBias = &optProjectionBias;
536 if(descriptor.m_PeepholeEnabled)
538 optCellToForgetWeights =
539 OverrideDataType(cLayer->m_PeepholeParameters.m_CellToForgetWeights->GetTensorInfo(), dataType);
540 paramsInfo.m_CellToForgetWeights = &optCellToForgetWeights;
541 optCellToOutputWeights =
542 OverrideDataType(cLayer->m_PeepholeParameters.m_CellToOutputWeights->GetTensorInfo(), dataType);
543 paramsInfo.m_CellToOutputWeights = &optCellToOutputWeights;
546 if(descriptor.m_LayerNormEnabled)
548 if (!descriptor.m_CifgEnabled)
550 optInputLayerNormWeights = OverrideDataType(
551 cLayer->m_LayerNormParameters.m_InputLayerNormWeights->GetTensorInfo(), dataType);
552 paramsInfo.m_InputLayerNormWeights = &optInputLayerNormWeights;
555 optForgetLayerNormWeights = OverrideDataType(
556 cLayer->m_LayerNormParameters.m_ForgetLayerNormWeights->GetTensorInfo(), dataType);
557 paramsInfo.m_ForgetLayerNormWeights = &optForgetLayerNormWeights;
559 optCellLayerNormWeights = OverrideDataType(
560 cLayer->m_LayerNormParameters.m_CellLayerNormWeights->GetTensorInfo(), dataType);
561 paramsInfo.m_CellLayerNormWeights = &optCellLayerNormWeights;
563 optOutputLayerNormWeights = OverrideDataType(
564 cLayer->m_LayerNormParameters.m_OutputLayerNormWeights->GetTensorInfo(), dataType);
565 paramsInfo.m_OutputLayerNormWeights = &optOutputLayerNormWeights;
568 result = layerSupportObject->IsLstmSupported(
587 result = layerSupportObject->IsMaximumSupported(OverrideDataType(input0, dataType),
588 OverrideDataType(input1, dataType),
589 OverrideDataType(output, dataType),
598 result = layerSupportObject->IsMemCopySupported(OverrideDataType(input, dataType),
599 OverrideDataType(output, dataType),
608 result = layerSupportObject->IsMemImportSupported(OverrideDataType(input, dataType),
609 OverrideDataType(output, dataType),
619 result = layerSupportObject->IsMergeSupported(OverrideDataType(input0, dataType),
620 OverrideDataType(input1, dataType),
621 OverrideDataType(output, dataType),
627 auto cLayer = boost::polymorphic_downcast<const ConcatLayer*>(&layer);
630 auto getTensorInfo = [&dataType](
const InputSlot& slot)
632 return OverrideDataType(slot.GetConnectedOutputSlot()->GetTensorInfo(), dataType);
634 auto beginI = boost::make_transform_iterator(layer.
GetInputSlots().begin(), getTensorInfo);
635 auto endI = boost::make_transform_iterator(layer.
GetInputSlots().end(), getTensorInfo);
636 std::vector<TensorInfo> inputs(beginI, endI);
642 auto beginPtr = boost::make_transform_iterator(inputs.begin(), getTensorInfoPtr);
643 auto endPtr = boost::make_transform_iterator(inputs.end(), getTensorInfoPtr);
644 std::vector<const TensorInfo*> inputPtrs(beginPtr, endPtr);
648 result = layerSupportObject->IsConcatSupported(inputPtrs, output, cLayer->GetParameters(), reason);
658 result = layerSupportObject->IsMultiplicationSupported(
659 OverrideDataType(input0, dataType),
660 OverrideDataType(input1, dataType),
661 OverrideDataType(output, dataType),
667 auto cLayer = boost::polymorphic_downcast<const NormalizationLayer*>(&layer);
670 result = layerSupportObject->IsNormalizationSupported(OverrideDataType(input, dataType),
671 OverrideDataType(output, dataType),
672 cLayer->GetParameters(),
679 result = layerSupportObject->IsOutputSupported(OverrideDataType(output, dataType), reason);
684 auto cLayer = boost::polymorphic_downcast<const PermuteLayer*>(&layer);
687 result = layerSupportObject->IsPermuteSupported(OverrideDataType(input, dataType),
688 OverrideDataType(output, dataType),
689 cLayer->GetParameters(),
695 auto cLayer = boost::polymorphic_downcast<const PadLayer*>(&layer);
698 result = layerSupportObject->IsPadSupported(
699 OverrideDataType(input, dataType),
700 OverrideDataType(output, dataType),
701 cLayer->GetParameters(),
707 auto cLayer = boost::polymorphic_downcast<const Pooling2dLayer*>(&layer);
710 result = layerSupportObject->IsPooling2dSupported(OverrideDataType(input, dataType),
711 OverrideDataType(output, dataType),
712 cLayer->GetParameters(),
718 auto cLayer = boost::polymorphic_downcast<const PreCompiledLayer*>(&layer);
720 result = layerSupportObject->IsPreCompiledSupported(OverrideDataType(input, dataType),
721 cLayer->GetParameters(),
729 result = layerSupportObject->IsQuantizeSupported(input, output, reason);
734 auto cLayer = boost::polymorphic_downcast<const QuantizedLstmLayer*>(&layer);
749 &cLayer->m_QuantizedLstmParameters.m_InputToInputWeights->GetTensorInfo();
751 &cLayer->m_QuantizedLstmParameters.m_InputToForgetWeights->GetTensorInfo();
753 &cLayer->m_QuantizedLstmParameters.m_InputToCellWeights->GetTensorInfo();
755 &cLayer->m_QuantizedLstmParameters.m_InputToOutputWeights->GetTensorInfo();
758 &cLayer->m_QuantizedLstmParameters.m_RecurrentToInputWeights->GetTensorInfo();
760 &cLayer->m_QuantizedLstmParameters.m_RecurrentToForgetWeights->GetTensorInfo();
762 &cLayer->m_QuantizedLstmParameters.m_RecurrentToCellWeights->GetTensorInfo();
764 &cLayer->m_QuantizedLstmParameters.m_RecurrentToOutputWeights->GetTensorInfo();
767 &cLayer->m_QuantizedLstmParameters.m_InputGateBias->GetTensorInfo();
769 &cLayer->m_QuantizedLstmParameters.m_ForgetGateBias->GetTensorInfo();
771 &cLayer->m_QuantizedLstmParameters.m_CellBias->GetTensorInfo();
773 &cLayer->m_QuantizedLstmParameters.m_OutputGateBias->GetTensorInfo();;
775 result = layerSupportObject->IsQuantizedLstmSupported(input,
789 result = layerSupportObject->IsDivisionSupported(
790 OverrideDataType(input0, dataType),
791 OverrideDataType(input1, dataType),
792 OverrideDataType(output, dataType),
798 auto cLayer = boost::polymorphic_downcast<const ReshapeLayer*>(&layer);
801 result = layerSupportObject->IsReshapeSupported(OverrideDataType(input, dataType),
802 OverrideDataType(output, dataType),
803 cLayer->GetParameters(),
809 auto cLayer = boost::polymorphic_downcast<const ResizeLayer*>(&layer);
812 result = layerSupportObject->IsResizeSupported(OverrideDataType(input, dataType),
813 OverrideDataType(output, dataType),
814 cLayer->GetParameters(),
820 auto cLayer = boost::polymorphic_downcast<const SliceLayer*>(&layer);
825 result = layerSupportObject->IsSliceSupported(OverrideDataType(input, dataType),
826 OverrideDataType(output, dataType),
827 cLayer->GetParameters(),
833 auto cLayer = boost::polymorphic_downcast<const SoftmaxLayer*>(&layer);
836 result = layerSupportObject->IsSoftmaxSupported(OverrideDataType(input, dataType),
837 OverrideDataType(output, dataType),
838 cLayer->GetParameters(),
844 auto cLayer = boost::polymorphic_downcast<const SpaceToBatchNdLayer*>(&layer);
847 result = layerSupportObject->IsSpaceToBatchNdSupported(OverrideDataType(input, dataType),
848 OverrideDataType(output, dataType),
849 cLayer->GetParameters(),
855 auto cLayer = boost::polymorphic_downcast<const SpaceToDepthLayer*>(&layer);
860 result = layerSupportObject->IsSpaceToDepthSupported(OverrideDataType(input, dataType),
861 OverrideDataType(output, dataType),
862 cLayer->GetParameters(),
868 auto cLayer = boost::polymorphic_downcast<const SplitterLayer*>(&layer);
872 auto getTensorInfo = [&dataType](
const OutputSlot& slot)
874 return OverrideDataType(slot.GetTensorInfo(), dataType);
876 auto beginI = boost::make_transform_iterator(layer.
GetOutputSlots().begin(), getTensorInfo);
877 auto endI = boost::make_transform_iterator(layer.
GetOutputSlots().end(), getTensorInfo);
878 std::vector<TensorInfo> outputs(beginI, endI);
880 const std::vector<std::reference_wrapper<TensorInfo>> outputPtrs(outputs.begin(), outputs.end());
882 result = layerSupportObject->IsSplitterSupported(OverrideDataType(input, dataType),
884 cLayer->GetParameters(),
890 auto cLayer = boost::polymorphic_downcast<const StackLayer*>(&layer);
893 auto getTensorInfo = [&dataType](
const InputSlot& slot)
895 return OverrideDataType(slot.GetConnectedOutputSlot()->GetTensorInfo(), dataType);
897 auto beginI = boost::make_transform_iterator(layer.
GetInputSlots().begin(), getTensorInfo);
898 auto endI = boost::make_transform_iterator(layer.
GetInputSlots().end(), getTensorInfo);
899 std::vector<TensorInfo> inputs(beginI, endI);
905 auto beginPtr = boost::make_transform_iterator(inputs.begin(), getTensorInfoPtr);
906 auto endPtr = boost::make_transform_iterator(inputs.end(), getTensorInfoPtr);
907 std::vector<const TensorInfo*> inputPtrs(beginPtr, endPtr);
911 result = layerSupportObject->IsStackSupported(inputPtrs, output, cLayer->GetParameters(), reason);
917 auto cLayer = boost::polymorphic_downcast<const StandInLayer*>(&layer);
920 auto getTensorInfoIn = [&dataType](
const InputSlot& slot)
922 return OverrideDataType(slot.GetConnectedOutputSlot()->GetTensorInfo(), dataType);
924 auto getTensorInfoOut = [&dataType](
const OutputSlot& slot)
926 return OverrideDataType(slot.GetTensorInfo(), dataType);
928 auto beginI = boost::make_transform_iterator(layer.
GetInputSlots().begin(), getTensorInfoIn);
929 auto endI = boost::make_transform_iterator(layer.
GetInputSlots().end(), getTensorInfoIn);
930 std::vector<TensorInfo> inputs(beginI, endI);
932 auto beginO = boost::make_transform_iterator(layer.
GetOutputSlots().begin(), getTensorInfoOut);
933 auto endO = boost::make_transform_iterator(layer.
GetOutputSlots().end(), getTensorInfoOut);
934 std::vector<TensorInfo> outputs(beginO, endO);
941 auto beginPtrI = boost::make_transform_iterator(inputs.begin(), getTensorInfoPtr);
942 auto endPtrI = boost::make_transform_iterator(inputs.end(), getTensorInfoPtr);
943 std::vector<const TensorInfo*> inputPtrs(beginPtrI, endPtrI);
945 auto beginPtrO = boost::make_transform_iterator(outputs.begin(), getTensorInfoPtr);
946 auto endPtrO = boost::make_transform_iterator(outputs.end(), getTensorInfoPtr);
947 std::vector<const TensorInfo*> outputPtrs(beginPtrO, endPtrO);
950 result = layerSupportObject->IsStandInSupported(inputPtrs,
952 cLayer->GetParameters(),
958 auto cLayer = boost::polymorphic_downcast<const StridedSliceLayer*>(&layer);
961 result = layerSupportObject->IsStridedSliceSupported(OverrideDataType(input, dataType),
962 OverrideDataType(output, dataType),
963 cLayer->GetParameters(),
972 result = layerSupportObject->IsSubtractionSupported(
973 OverrideDataType(input0, dataType),
974 OverrideDataType(input1, dataType),
975 OverrideDataType(output, dataType),
985 result = layerSupportObject->IsSwitchSupported(OverrideDataType(input0, dataType),
986 OverrideDataType(input1, dataType),
987 OverrideDataType(output0, dataType),
988 OverrideDataType(output1, dataType),
994 auto cLayer = boost::polymorphic_downcast<const MeanLayer*>(&layer);
997 result = layerSupportObject->IsMeanSupported(
998 OverrideDataType(input, dataType),
999 OverrideDataType(output, dataType),
1000 cLayer->GetParameters(),
1009 result = layerSupportObject->IsMinimumSupported(OverrideDataType(input0, dataType),
1010 OverrideDataType(input1, dataType),
1011 OverrideDataType(output, dataType),
1020 result = layerSupportObject->IsPreluSupported(OverrideDataType(input, dataType),
1021 OverrideDataType(alpha, dataType),
1022 OverrideDataType(output, dataType),
1028 auto cLayer = boost::polymorphic_downcast<const TransposeConvolution2dLayer*>(&layer);
1037 if (descriptor.m_BiasEnabled)
1039 BOOST_ASSERT(cLayer->m_Bias.get() !=
nullptr);
1040 biases = OverrideDataType(cLayer->m_Bias->GetTensorInfo(),
1044 BOOST_ASSERT(cLayer->m_Weight.get() !=
nullptr);
1045 const TensorInfo weights = OverrideDataType(cLayer->m_Weight->GetTensorInfo(), dataType);
1047 result = layerSupportObject->IsTransposeConvolution2dSupported(input,
1058 BOOST_ASSERT_MSG(
false,
"WorkloadFactory did not recognise type of layer.");
1059 reason.
value() =
"Unrecognised layer type";
1069 std::string& outReasonIfUnsupported)
1071 auto layer = boost::polymorphic_downcast<const Layer*>(&connectableLayer);
1072 return IsLayerSupported(layer->GetBackendId(), connectableLayer, dataType, outReasonIfUnsupported);
1079 return std::unique_ptr<IWorkload>();
1085 return std::unique_ptr<IWorkload>();
1091 return std::unique_ptr<IWorkload>();
1097 return std::unique_ptr<IWorkload>();
1103 return std::unique_ptr<IWorkload>();
1109 return std::unique_ptr<IWorkload>();
1115 return std::unique_ptr<IWorkload>();
1121 return std::unique_ptr<IWorkload>();
1127 return std::unique_ptr<IWorkload>();
1133 return std::unique_ptr<IWorkload>();
1139 return std::unique_ptr<IWorkload>();
1145 return std::unique_ptr<IWorkload>();
1151 return std::unique_ptr<IWorkload>();
1157 return std::unique_ptr<IWorkload>();
1163 return std::unique_ptr<IWorkload>();
1169 return std::unique_ptr<IWorkload>();
1175 return std::unique_ptr<IWorkload>();
1181 return std::unique_ptr<IWorkload>();
1187 return std::unique_ptr<IWorkload>();
1193 return std::unique_ptr<IWorkload>();
1199 return std::unique_ptr<IWorkload>();
1205 return std::unique_ptr<IWorkload>();
1211 return std::unique_ptr<IWorkload>();
1217 return std::unique_ptr<IWorkload>();
1223 return std::unique_ptr<IWorkload>();
1230 return std::unique_ptr<IWorkload>();
1236 return std::unique_ptr<IWorkload>();
1242 return std::unique_ptr<IWorkload>();
1248 return std::unique_ptr<IWorkload>();
1254 return std::unique_ptr<IWorkload>();
1260 return std::unique_ptr<IWorkload>();
1266 return std::unique_ptr<IWorkload>();
1272 return std::unique_ptr<IWorkload>();
1278 return std::unique_ptr<IWorkload>();
1284 return std::unique_ptr<IWorkload>();
1290 return std::unique_ptr<IWorkload>();
1296 return std::unique_ptr<IWorkload>();
1302 return std::unique_ptr<IWorkload>();
1308 return std::unique_ptr<IWorkload>();
1314 return std::unique_ptr<IWorkload>();
1320 return std::unique_ptr<IWorkload>();
1326 return std::unique_ptr<IWorkload>();
1332 return std::unique_ptr<IWorkload>();
1338 return std::unique_ptr<IWorkload>();
1344 return std::unique_ptr<IWorkload>();
1350 return std::unique_ptr<IWorkload>();
1356 return std::unique_ptr<IWorkload>();
1362 return std::unique_ptr<IWorkload>();
1368 return std::unique_ptr<IWorkload>();
1374 return std::unique_ptr<IWorkload>();
1380 return std::unique_ptr<IWorkload>();
1386 return std::unique_ptr<IWorkload>();
1392 return std::unique_ptr<IWorkload>();
1398 return std::unique_ptr<IWorkload>();
1404 return std::unique_ptr<IWorkload>();
1410 return std::unique_ptr<IWorkload>();
1416 return std::unique_ptr<IWorkload>();
1422 return std::unique_ptr<IWorkload>();
1428 return std::unique_ptr<IWorkload>();
1435 return std::unique_ptr<IWorkload>();
virtual std::unique_ptr< IWorkload > CreateDepthwiseConvolution2d(const DepthwiseConvolution2dQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateMultiplication(const MultiplicationQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateRsqrt(const RsqrtQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateSpaceToBatchNd(const SpaceToBatchNdQueueDescriptor &descriptor, const WorkloadInfo &info) const
LayerType GetType() const
virtual std::unique_ptr< IWorkload > CreatePad(const PadQueueDescriptor &descriptor, const WorkloadInfo &Info) const
virtual std::unique_ptr< IWorkload > CreatePreCompiled(const PreCompiledQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateMerger(const MergerQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateMemImport(const MemImportQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateSplitter(const SplitterQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual const TensorInfo & GetTensorInfo() const =0
virtual std::unique_ptr< IWorkload > CreateConvertFp16ToFp32(const ConvertFp16ToFp32QueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateMemCopy(const MemCopyQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateNormalization(const NormalizationQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateAbs(const AbsQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateConstant(const ConstantQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateMean(const MeanQueueDescriptor &descriptor, const WorkloadInfo &Info) const
const std::vector< OutputSlot > & GetOutputSlots() const
virtual std::unique_ptr< IWorkload > CreateStack(const StackQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateBatchNormalization(const BatchNormalizationQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateDetectionPostProcess(const DetectionPostProcessQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateArgMinMax(const ArgMinMaxQueueDescriptor &descriptor, const WorkloadInfo &info) const
A L2NormalizationDescriptor for the L2NormalizationLayer.
virtual std::unique_ptr< IWorkload > CreateSoftmax(const SoftmaxQueueDescriptor &descriptor, const WorkloadInfo &info) const
bool m_BiasEnabled
Enable/disable bias.
const std::vector< InputSlot > & GetInputSlots() const
virtual std::unique_ptr< IWorkload > CreateMinimum(const MinimumQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual const char * GetName() const =0
virtual std::unique_ptr< IWorkload > CreateResizeBilinear(const ResizeBilinearQueueDescriptor &descriptor, const WorkloadInfo &info) const
A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
virtual std::unique_ptr< IWorkload > CreateGather(const GatherQueueDescriptor &descriptor, const WorkloadInfo &info) const
armnn::Optional< armnn::DataType > GetBiasTypeFromWeightsType(armnn::Optional< armnn::DataType > weightsType)
virtual std::unique_ptr< IWorkload > CreateDivision(const DivisionQueueDescriptor &descriptor, const WorkloadInfo &info) const
std::vector< float > scores({ 0.0f, 0.9f, 0.8f, 0.0f, 0.75f, 0.72f, 0.0f, 0.6f, 0.5f, 0.0f, 0.93f, 0.95f, 0.0f, 0.5f, 0.4f, 0.0f, 0.3f, 0.2f })
virtual std::unique_ptr< IWorkload > CreateOutput(const OutputQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateGreater(const GreaterQueueDescriptor &descriptor, const WorkloadInfo &info) const
BackendRegistry & BackendRegistryInstance()
virtual std::unique_ptr< IWorkload > CreateConcat(const ConcatQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateSpaceToDepth(const SpaceToDepthQueueDescriptor &descriptor, const WorkloadInfo &info) const
An LstmDescriptor for the LstmLayer.
virtual std::unique_ptr< IWorkload > CreateSwitch(const SwitchQueueDescriptor &descriptor, const WorkloadInfo &Info) const
An InstanceNormalizationDescriptor for InstanceNormalizationLayer.
A FullyConnectedDescriptor for the FullyConnectedLayer.
virtual std::unique_ptr< IWorkload > CreatePooling2d(const Pooling2dQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateAddition(const AdditionQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateFakeQuantization(const FakeQuantizationQueueDescriptor &descriptor, const WorkloadInfo &info) const
std::vector< float > boxEncodings({ 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, -1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f })
virtual std::unique_ptr< IWorkload > CreateLstm(const LstmQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateL2Normalization(const L2NormalizationQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateFullyConnected(const FullyConnectedQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateReshape(const ReshapeQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateQuantize(const QuantizeQueueDescriptor &descriptor, const WorkloadInfo &Info) const
virtual std::unique_ptr< IWorkload > CreateActivation(const ActivationQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreatePermute(const PermuteQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateInstanceNormalization(const InstanceNormalizationQueueDescriptor &descriptor, const WorkloadInfo &info) const
std::vector< float > anchors({ 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 10.5f, 1.0f, 1.0f, 0.5f, 10.5f, 1.0f, 1.0f, 0.5f, 100.5f, 1.0f, 1.0f })
virtual std::unique_ptr< IWorkload > CreateBatchToSpaceNd(const BatchToSpaceNdQueueDescriptor &descriptor, const WorkloadInfo &Info) const
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
static bool IsLayerSupported(const BackendId &backendId, const IConnectableLayer &layer, Optional< DataType > dataType, std::string &outReasonIfUnsupported)
virtual std::unique_ptr< IWorkload > CreateConvolution2d(const Convolution2dQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateSlice(const SliceQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateElementwiseUnary(const ElementwiseUnaryQueueDescriptor &descriptor, const WorkloadInfo &Info) const
virtual std::unique_ptr< IWorkload > CreateLogSoftmax(const LogSoftmaxQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateComparison(const ComparisonQueueDescriptor &descriptor, const WorkloadInfo &Info) const
virtual std::unique_ptr< IWorkload > CreateMaximum(const MaximumQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateQuantizedLstm(const QuantizedLstmQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreatePrelu(const PreluQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateFloor(const FloorQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateConvertFp32ToFp16(const ConvertFp32ToFp16QueueDescriptor &descriptor, const WorkloadInfo &info) const
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
A Convolution2dDescriptor for the Convolution2dLayer.
virtual std::unique_ptr< IWorkload > CreateDepthToSpace(const DepthToSpaceQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateTransposeConvolution2d(const TransposeConvolution2dQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateStridedSlice(const StridedSliceQueueDescriptor &descriptor, const WorkloadInfo &Info) const
virtual std::unique_ptr< IWorkload > CreateDebug(const DebugQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateResize(const ResizeQueueDescriptor &descriptor, const WorkloadInfo &info) const
const TensorInfo & GetTensorInfo() const override
An ArgMinMaxDescriptor for ArgMinMaxLayer.
virtual std::unique_ptr< IWorkload > CreateEqual(const EqualQueueDescriptor &descriptor, const WorkloadInfo &Info) const
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
virtual std::unique_ptr< IWorkload > CreateDequantize(const DequantizeQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateSubtraction(const SubtractionQueueDescriptor &descriptor, const WorkloadInfo &info) const
const InputSlot & GetInputSlot(unsigned int index) const override
virtual std::unique_ptr< IWorkload > CreateMerge(const MergeQueueDescriptor &descriptor, const WorkloadInfo &info) const