2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
5 #include <armnn/LayerSupport.hpp>
6 #include <armnn/Optional.hpp>
7 #include <armnn/ILayerSupport.hpp>
9 #include <backendsCommon/BackendRegistry.hpp>
10 #include <backendsCommon/IBackendInternal.hpp>
12 #include <boost/assert.hpp>
16 #include <unordered_map>
17 #include <armnn/ArmNN.hpp>
22 /// Helper function to copy a full string to a truncated version.
23 void CopyErrorMessage(char* truncatedString, const char* fullString, size_t maxLength)
25 if(truncatedString != nullptr)
27 std::snprintf(truncatedString, maxLength, "%s", fullString);
31 } // anonymous namespace
36 // Helper macro to avoid code duplication.
37 // Forwards function func to funcRef, funcNeon or funcCl, depending on the value of backendId.
38 #define FORWARD_LAYER_SUPPORT_FUNC(backendId, func, ...) \
39 std::string reasonIfUnsupportedFull; \
42 auto const& backendRegistry = BackendRegistryInstance(); \
43 if (!backendRegistry.IsBackendRegistered(backendId)) \
45 std::stringstream ss; \
46 ss << __func__ << " is not supported on " << backendId << " because this backend is not registered."; \
47 reasonIfUnsupportedFull = ss.str(); \
48 isSupported = false; \
52 auto factoryFunc = backendRegistry.GetFactory(backendId); \
53 auto backendObject = factoryFunc(); \
54 auto layerSupportObject = backendObject->GetLayerSupport(); \
55 isSupported = layerSupportObject->func(__VA_ARGS__, Optional<std::string&>(reasonIfUnsupportedFull)); \
56 CopyErrorMessage(reasonIfUnsupported, reasonIfUnsupportedFull.c_str(), reasonIfUnsupportedMaxLength); \
58 } catch (const InvalidArgumentException &e) { \
59 /* re-throwing with more context information */ \
60 throw InvalidArgumentException(e, "Failed to check layer support", CHECK_LOCATION()); \
64 bool CheckTensorDataTypesEqual(const TensorInfo& input0, const TensorInfo& input1)
66 return input0.GetDataType() == input1.GetDataType();
69 bool IsActivationSupported(const BackendId& backend,
70 const TensorInfo& input,
71 const TensorInfo& output,
72 const ActivationDescriptor& descriptor,
73 char* reasonIfUnsupported,
74 size_t reasonIfUnsupportedMaxLength)
76 FORWARD_LAYER_SUPPORT_FUNC(backend, IsActivationSupported, input, output, descriptor);
79 bool IsAdditionSupported(const BackendId& backend,
80 const TensorInfo& input0,
81 const TensorInfo& input1,
82 const TensorInfo& output,
83 char* reasonIfUnsupported,
84 size_t reasonIfUnsupportedMaxLength)
86 if(!CheckTensorDataTypesEqual(input0, input1))
91 FORWARD_LAYER_SUPPORT_FUNC(backend, IsAdditionSupported, input0, input1, output);
94 bool IsBatchNormalizationSupported(const BackendId& backend,
95 const TensorInfo& input,
96 const TensorInfo& output,
97 const TensorInfo& mean,
98 const TensorInfo& var,
99 const TensorInfo& beta,
100 const TensorInfo& gamma,
101 const BatchNormalizationDescriptor& descriptor,
102 char* reasonIfUnsupported,
103 size_t reasonIfUnsupportedMaxLength)
105 FORWARD_LAYER_SUPPORT_FUNC(backend,
106 IsBatchNormalizationSupported,
116 bool IsBatchToSpaceNdSupported(const BackendId& backend,
117 const TensorInfo& input,
118 const TensorInfo& output,
119 const BatchToSpaceNdDescriptor& descriptor,
120 char* reasonIfUnsupported,
121 size_t reasonIfUnsupportedMaxLength)
123 FORWARD_LAYER_SUPPORT_FUNC(backend,
124 IsBatchToSpaceNdSupported,
130 bool IsConcatSupported(const BackendId& backend,
131 std::vector<const TensorInfo*> inputs,
132 const TensorInfo& output,
133 const OriginsDescriptor& descriptor,
134 char* reasonIfUnsupported,
135 size_t reasonIfUnsupportedMaxLength)
137 BOOST_ASSERT(inputs.size() > 0);
139 FORWARD_LAYER_SUPPORT_FUNC(backend, IsConcatSupported, inputs, output, descriptor);
142 bool IsConstantSupported(const BackendId& backend,
143 const TensorInfo& output,
144 char* reasonIfUnsupported,
145 size_t reasonIfUnsupportedMaxLength)
147 FORWARD_LAYER_SUPPORT_FUNC(backend, IsConstantSupported, output);
150 bool IsConvertFp16ToFp32Supported(const BackendId& backend,
151 const TensorInfo& input,
152 const TensorInfo& output,
153 char* reasonIfUnsupported,
154 size_t reasonIfUnsupportedMaxLength)
156 FORWARD_LAYER_SUPPORT_FUNC(backend, IsConvertFp16ToFp32Supported, input, output);
159 bool IsConvertFp32ToFp16Supported(const BackendId& backend,
160 const TensorInfo& input,
161 const TensorInfo& output,
162 char* reasonIfUnsupported,
163 size_t reasonIfUnsupportedMaxLength)
165 FORWARD_LAYER_SUPPORT_FUNC(backend, IsConvertFp32ToFp16Supported, input, output);
168 bool IsConvolution2dSupported(const BackendId& backend,
169 const TensorInfo& input,
170 const TensorInfo& output,
171 const Convolution2dDescriptor& descriptor,
172 const TensorInfo& weights,
173 const Optional<TensorInfo>& biases,
174 char* reasonIfUnsupported,
175 size_t reasonIfUnsupportedMaxLength)
177 FORWARD_LAYER_SUPPORT_FUNC(backend, IsConvolution2dSupported, input, output, descriptor, weights, biases);
180 bool IsDebugSupported(const BackendId& backend,
181 const TensorInfo& input,
182 const TensorInfo& output,
183 char* reasonIfUnsupported,
184 size_t reasonIfUnsupportedMaxLength)
186 FORWARD_LAYER_SUPPORT_FUNC(backend, IsDebugSupported, input, output);
189 bool IsDepthwiseConvolutionSupported(const BackendId& backend,
190 const TensorInfo& input,
191 const TensorInfo& output,
192 const DepthwiseConvolution2dDescriptor& descriptor,
193 const TensorInfo& weights,
194 const Optional<TensorInfo>& biases,
195 char* reasonIfUnsupported,
196 size_t reasonIfUnsupportedMaxLength)
198 if (descriptor.m_DilationX == 1 && descriptor.m_DilationY == 1)
200 // Pre 19.05 ArmNN did not have the dilation parameters.
201 // This version of IsDepthwiseConvolutionSupported is called for backwards-compatibility
202 FORWARD_LAYER_SUPPORT_FUNC(backend,
203 IsDepthwiseConvolutionSupported,
212 FORWARD_LAYER_SUPPORT_FUNC(backend,
213 IsDilatedDepthwiseConvolutionSupported,
222 bool IsDequantizeSupported(const BackendId& backend,
223 const TensorInfo& input,
224 const TensorInfo& output,
225 char* reasonIfUnsupported,
226 size_t reasonIfUnsupportedMaxLength)
228 FORWARD_LAYER_SUPPORT_FUNC(backend, IsDequantizeSupported, input, output);
231 bool IsDetectionPostProcessSupported(const BackendId& backend,
232 const TensorInfo& input0,
233 const TensorInfo& input1,
234 const DetectionPostProcessDescriptor& descriptor,
235 char* reasonIfUnsupported,
236 size_t reasonIfUnsupportedMaxLength);
238 bool IsDivisionSupported(const BackendId& backend,
239 const TensorInfo& input0,
240 const TensorInfo& input1,
241 const TensorInfo& output,
242 char* reasonIfUnsupported,
243 size_t reasonIfUnsupportedMaxLength)
245 FORWARD_LAYER_SUPPORT_FUNC(backend, IsDivisionSupported, input0, input1, output);
248 bool IsEqualSupported(const BackendId& backend,
249 const TensorInfo& input0,
250 const TensorInfo& input1,
251 const TensorInfo& output,
252 char* reasonIfUnsupported,
253 size_t reasonIfUnsupportedMaxLength)
255 FORWARD_LAYER_SUPPORT_FUNC(backend, IsEqualSupported, input0, input1, output);
258 bool IsFakeQuantizationSupported(const BackendId& backend,
259 const TensorInfo& input,
260 const FakeQuantizationDescriptor& descriptor,
261 char* reasonIfUnsupported,
262 size_t reasonIfUnsupportedMaxLength)
264 FORWARD_LAYER_SUPPORT_FUNC(backend, IsFakeQuantizationSupported, input, descriptor);
267 bool IsFloorSupported(const BackendId& backend,
268 const TensorInfo& input,
269 const TensorInfo& output,
270 char* reasonIfUnsupported,
271 size_t reasonIfUnsupportedMaxLength)
273 // By definition (that is, regardless of compute device), shapes and data type must match.
274 if (input.GetShape() != output.GetShape() || input.GetDataType() != output.GetDataType())
279 FORWARD_LAYER_SUPPORT_FUNC(backend, IsFloorSupported, input, output);
281 bool IsFullyConnectedSupported(const BackendId& backend,
282 const TensorInfo& input,
283 const TensorInfo& output,
284 const TensorInfo& weights,
285 const TensorInfo& biases,
286 const FullyConnectedDescriptor& descriptor,
287 char* reasonIfUnsupported,
288 size_t reasonIfUnsupportedMaxLength)
290 FORWARD_LAYER_SUPPORT_FUNC(backend, IsFullyConnectedSupported, input, output, weights, biases, descriptor);
293 bool IsGatherSupported(const BackendId& backend,
294 const TensorInfo& input0,
295 const TensorInfo& input1,
296 const TensorInfo& output,
297 char* reasonIfUnsupported,
298 size_t reasonIfUnsupportedMaxLength)
300 FORWARD_LAYER_SUPPORT_FUNC(backend, IsGatherSupported, input0, input1, output);
303 bool IsGreaterSupported(const BackendId& backend,
304 const TensorInfo& input0,
305 const TensorInfo& input1,
306 const TensorInfo& output,
307 char* reasonIfUnsupported,
308 size_t reasonIfUnsupportedMaxLength)
310 FORWARD_LAYER_SUPPORT_FUNC(backend, IsGreaterSupported, input0, input1, output);
313 bool IsInputSupported(const BackendId& backend,
314 const TensorInfo& input,
315 char* reasonIfUnsupported,
316 size_t reasonIfUnsupportedMaxLength)
318 FORWARD_LAYER_SUPPORT_FUNC(backend, IsInputSupported, input);
322 bool IsL2NormalizationSupported(const BackendId& backend,
323 const TensorInfo& input,
324 const TensorInfo& output,
325 const L2NormalizationDescriptor& descriptor,
326 char* reasonIfUnsupported,
327 size_t reasonIfUnsupportedMaxLength)
329 FORWARD_LAYER_SUPPORT_FUNC(backend, IsL2NormalizationSupported, input, output, descriptor);
332 bool IsLstmSupported(const BackendId& backend, const TensorInfo& input, const TensorInfo& outputStateIn,
333 const TensorInfo& cellStateIn, const TensorInfo& scratchBuffer,
334 const TensorInfo& outputStateOut, const TensorInfo& cellStateOut,
335 const TensorInfo& output, const LstmDescriptor& descriptor,
336 const LstmInputParamsInfo& paramsInfo, char* reasonIfUnsupported,
337 size_t reasonIfUnsupportedMaxLength)
340 FORWARD_LAYER_SUPPORT_FUNC(backend, IsLstmSupported, input, outputStateIn, cellStateIn,
341 scratchBuffer, outputStateOut, cellStateOut,
342 output, descriptor, paramsInfo);
345 bool IsMaximumSupported(const BackendId& backend,
346 const TensorInfo& input0,
347 const TensorInfo& input1,
348 const TensorInfo& output,
349 char* reasonIfUnsupported,
350 size_t reasonIfUnsupportedMaxLength)
352 FORWARD_LAYER_SUPPORT_FUNC(backend, IsMaximumSupported, input0, input1, output);
355 bool IsMeanSupported(const BackendId& backend,
356 const TensorInfo& input,
357 const TensorInfo& output,
358 const MeanDescriptor& descriptor,
359 char* reasonIfUnsupported,
360 size_t reasonIfUnsupportedMaxLength)
362 FORWARD_LAYER_SUPPORT_FUNC(backend, IsMeanSupported, input, output, descriptor);
365 bool IsMemCopySupported(const BackendId &backend,
366 const TensorInfo &input,
367 const TensorInfo &output,
368 char *reasonIfUnsupported,
369 size_t reasonIfUnsupportedMaxLength)
371 FORWARD_LAYER_SUPPORT_FUNC(backend, IsMemCopySupported, input, output);
374 bool IsMemImportSupported(const BackendId &backend,
375 const TensorInfo &input,
376 const TensorInfo &output,
377 char *reasonIfUnsupported,
378 size_t reasonIfUnsupportedMaxLength)
380 FORWARD_LAYER_SUPPORT_FUNC(backend, IsMemImportSupported, input, output);
383 bool IsMergeSupported(const BackendId& backend,
384 const TensorInfo& input0,
385 const TensorInfo& input1,
386 const TensorInfo& output,
387 char* reasonIfUnsupported,
388 size_t reasonIfUnsupportedMaxLength)
390 FORWARD_LAYER_SUPPORT_FUNC(backend, IsMergeSupported, input0, input1, output);
393 ARMNN_DEPRECATED_MSG("Use IsConcatSupported instead")
394 bool IsMergerSupported(const BackendId& backend,
395 std::vector<const TensorInfo*> inputs,
396 const TensorInfo& output,
397 const OriginsDescriptor& descriptor,
398 char* reasonIfUnsupported,
399 size_t reasonIfUnsupportedMaxLength)
401 BOOST_ASSERT(inputs.size() > 0);
403 ARMNN_NO_DEPRECATE_WARN_BEGIN
404 FORWARD_LAYER_SUPPORT_FUNC(backend, IsMergerSupported, inputs, output, descriptor);
405 ARMNN_NO_DEPRECATE_WARN_END
408 bool IsMinimumSupported(const BackendId& backend,
409 const TensorInfo& input0,
410 const TensorInfo& input1,
411 const TensorInfo& output,
412 char* reasonIfUnsupported,
413 size_t reasonIfUnsupportedMaxLength)
415 FORWARD_LAYER_SUPPORT_FUNC(backend, IsMinimumSupported, input0, input1, output);
418 bool IsMultiplicationSupported(const BackendId& backend,
419 const TensorInfo& input0,
420 const TensorInfo& input1,
421 const TensorInfo& output,
422 char* reasonIfUnsupported,
423 size_t reasonIfUnsupportedMaxLength)
425 FORWARD_LAYER_SUPPORT_FUNC(backend, IsMultiplicationSupported, input0, input1, output);
428 bool IsNormalizationSupported(const BackendId& backend,
429 const TensorInfo& input,
430 const TensorInfo& output,
431 const NormalizationDescriptor& descriptor,
432 char* reasonIfUnsupported,
433 size_t reasonIfUnsupportedMaxLength)
435 FORWARD_LAYER_SUPPORT_FUNC(backend, IsNormalizationSupported, input, output, descriptor);
438 bool IsOutputSupported(const BackendId& backend,
439 const TensorInfo& output,
440 char* reasonIfUnsupported,
441 size_t reasonIfUnsupportedMaxLength)
443 FORWARD_LAYER_SUPPORT_FUNC(backend, IsOutputSupported, output);
446 bool IsPadSupported(const BackendId& backend,
447 const TensorInfo& input,
448 const TensorInfo& output,
449 const PadDescriptor& descriptor,
450 char* reasonIfUnsupported,
451 size_t reasonIfUnsupportedMaxLength)
454 FORWARD_LAYER_SUPPORT_FUNC(backend, IsPadSupported, input, output, descriptor);
457 bool IsQuantizeSupported(const BackendId& backend,
458 const TensorInfo& input,
459 const TensorInfo& output,
460 char* reasonIfUnsupported,
461 size_t reasonIfUnsupportedMaxLength)
463 FORWARD_LAYER_SUPPORT_FUNC(backend, IsQuantizeSupported, input, output);
466 bool IsQuantizedLstmSupported(const BackendId& backend,
467 const TensorInfo& input,
468 const TensorInfo& previousCellStateIn,
469 const TensorInfo& previousOutputIn,
470 const TensorInfo& cellStateOut,
471 const TensorInfo& output,
472 const QuantizedLstmInputParamsInfo& paramsInfo,
473 char* reasonIfUnsupported,
474 size_t reasonIfUnsupportedMaxLength)
477 FORWARD_LAYER_SUPPORT_FUNC(backend, IsQuantizedLstmSupported, input, previousCellStateIn, previousOutputIn,
478 cellStateOut, output, paramsInfo);
481 bool IsPermuteSupported(const BackendId& backend,
482 const TensorInfo& input,
483 const TensorInfo& output,
484 const PermuteDescriptor& descriptor,
485 char* reasonIfUnsupported,
486 size_t reasonIfUnsupportedMaxLength)
488 FORWARD_LAYER_SUPPORT_FUNC(backend, IsPermuteSupported, input, output, descriptor);
491 bool IsPooling2dSupported(const BackendId& backend,
492 const TensorInfo& input,
493 const TensorInfo& output,
494 const Pooling2dDescriptor& descriptor,
495 char* reasonIfUnsupported,
496 size_t reasonIfUnsupportedMaxLength)
498 FORWARD_LAYER_SUPPORT_FUNC(backend, IsPooling2dSupported, input, output, descriptor);
501 bool IsPreluSupported(const BackendId& backend,
502 const TensorInfo& input,
503 const TensorInfo& alpha,
504 const TensorInfo& output,
505 char* reasonIfUnsupported,
506 size_t reasonIfUnsupportedMaxLength)
508 FORWARD_LAYER_SUPPORT_FUNC(backend, IsPreluSupported, input, alpha, output);
511 bool IsReshapeSupported(const BackendId& backend,
512 const TensorInfo& input,
513 const ReshapeDescriptor& descriptor,
514 char* reasonIfUnsupported,
515 size_t reasonIfUnsupportedMaxLength)
517 FORWARD_LAYER_SUPPORT_FUNC(backend, IsReshapeSupported, input, descriptor);
520 bool IsResizeSupported(const BackendId& backend,
521 const TensorInfo& input,
522 const TensorInfo& output,
523 const ResizeDescriptor& descriptor,
524 char* reasonIfUnsupported,
525 size_t reasonIfUnsupportedMaxLength)
527 FORWARD_LAYER_SUPPORT_FUNC(backend, IsResizeSupported, input, output, descriptor);
530 ARMNN_DEPRECATED_MSG("Use IsResizeSupported instead")
531 bool IsResizeBilinearSupported(const BackendId& backend,
532 const TensorInfo& input,
533 const TensorInfo& output,
534 char* reasonIfUnsupported,
535 size_t reasonIfUnsupportedMaxLength)
537 ResizeDescriptor descriptor;
538 descriptor.m_Method = ResizeMethod::Bilinear;
540 const TensorShape& outputShape = output.GetShape();
541 descriptor.m_TargetWidth = outputShape[3];
542 descriptor.m_TargetHeight = outputShape[2];
544 FORWARD_LAYER_SUPPORT_FUNC(backend, IsResizeSupported, input, output, descriptor);
547 bool IsRsqrtSupported(const BackendId& backend,
548 const TensorInfo& input,
549 const TensorInfo& output,
550 char* reasonIfUnsupported,
551 size_t reasonIfUnsupportedMaxLength)
553 FORWARD_LAYER_SUPPORT_FUNC(backend, IsRsqrtSupported, input, output);
556 bool IsSoftmaxSupported(const BackendId& backend,
557 const TensorInfo& input,
558 const TensorInfo& output,
559 const SoftmaxDescriptor& descriptor,
560 char* reasonIfUnsupported,
561 size_t reasonIfUnsupportedMaxLength)
563 FORWARD_LAYER_SUPPORT_FUNC(backend, IsSoftmaxSupported, input, output, descriptor);
566 bool IsSpaceToBatchNdSupported(const BackendId& backend,
567 const TensorInfo& input,
568 const TensorInfo& output,
569 const SpaceToBatchNdDescriptor& descriptor,
570 char* reasonIfUnsupported,
571 size_t reasonIfUnsupportedMaxLength)
573 FORWARD_LAYER_SUPPORT_FUNC(backend, IsSpaceToBatchNdSupported, input, output, descriptor);
576 bool IsSpaceToDepthSupported(const BackendId& backend,
577 const TensorInfo& input,
578 const TensorInfo& output,
579 const SpaceToDepthDescriptor& descriptor,
580 char* reasonIfUnsupported,
581 size_t reasonIfUnsupportedMaxLength)
583 FORWARD_LAYER_SUPPORT_FUNC(backend, IsSpaceToDepthSupported, input, output, descriptor);
586 ARMNN_DEPRECATED_MSG("Use IsSplitterSupported with outputs instead")
587 bool IsSplitterSupported(const BackendId& backend,
588 const TensorInfo& input,
589 const ViewsDescriptor& descriptor,
590 char* reasonIfUnsupported,
591 size_t reasonIfUnsupportedMaxLength)
593 ARMNN_NO_DEPRECATE_WARN_BEGIN
594 FORWARD_LAYER_SUPPORT_FUNC(backend, IsSplitterSupported, input, descriptor);
595 ARMNN_NO_DEPRECATE_WARN_END
598 bool IsSplitterSupported(const BackendId& backend,
599 const TensorInfo& input,
600 const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
601 const ViewsDescriptor& descriptor,
602 char* reasonIfUnsupported,
603 size_t reasonIfUnsupportedMaxLength)
605 FORWARD_LAYER_SUPPORT_FUNC(backend, IsSplitterSupported, input, outputs, descriptor);
608 bool IsStridedSliceSupported(const BackendId& backend,
609 const TensorInfo& input,
610 const TensorInfo& output,
611 const StridedSliceDescriptor& descriptor,
612 char* reasonIfUnsupported,
613 size_t reasonIfUnsupportedMaxLength)
615 FORWARD_LAYER_SUPPORT_FUNC(backend, IsStridedSliceSupported, input, output, descriptor);
618 bool IsSubtractionSupported(const BackendId& backend,
619 const TensorInfo& input0,
620 const TensorInfo& input1,
621 const TensorInfo& output,
622 char* reasonIfUnsupported,
623 size_t reasonIfUnsupportedMaxLength)
625 FORWARD_LAYER_SUPPORT_FUNC(backend, IsSubtractionSupported, input0, input1, output);
628 bool IsSwitchSupported(const BackendId& backend,
629 const TensorInfo& input0,
630 const TensorInfo& input1,
631 const TensorInfo& output0,
632 const TensorInfo& output1,
633 char* reasonIfUnsupported,
634 size_t reasonIfUnsupportedMaxLength)
636 FORWARD_LAYER_SUPPORT_FUNC(backend, IsSwitchSupported, input0, input1, output0, output1);