2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
5 #include <armnn/LayerSupport.hpp>
6 #include <armnn/Optional.hpp>
7 #include <armnn/ILayerSupport.hpp>
9 #include <backendsCommon/BackendRegistry.hpp>
10 #include <backendsCommon/IBackendInternal.hpp>
12 #include <boost/assert.hpp>
16 #include <unordered_map>
17 #include <armnn/ArmNN.hpp>
22 /// Helper function to copy a full string to a truncated version.
23 void CopyErrorMessage(char* truncatedString, const char* fullString, size_t maxLength)
25 if(truncatedString != nullptr)
27 std::snprintf(truncatedString, maxLength, "%s", fullString);
31 } // anonymous namespace
36 // Helper macro to avoid code duplication.
37 // Forwards function func to funcRef, funcNeon or funcCl, depending on the value of backendId.
38 #define FORWARD_LAYER_SUPPORT_FUNC(backendId, func, ...) \
39 std::string reasonIfUnsupportedFull; \
42 auto const& backendRegistry = BackendRegistryInstance(); \
43 if (!backendRegistry.IsBackendRegistered(backendId)) \
45 std::stringstream ss; \
46 ss << __func__ << " is not supported on " << backendId << " because this backend is not registered."; \
47 reasonIfUnsupportedFull = ss.str(); \
48 isSupported = false; \
52 auto factoryFunc = backendRegistry.GetFactory(backendId); \
53 auto backendObject = factoryFunc(); \
54 auto layerSupportObject = backendObject->GetLayerSupport(); \
55 isSupported = layerSupportObject->func(__VA_ARGS__, Optional<std::string&>(reasonIfUnsupportedFull)); \
56 CopyErrorMessage(reasonIfUnsupported, reasonIfUnsupportedFull.c_str(), reasonIfUnsupportedMaxLength); \
58 } catch (const InvalidArgumentException &e) { \
59 /* re-throwing with more context information */ \
60 throw InvalidArgumentException(e, "Failed to check layer support", CHECK_LOCATION()); \
64 bool CheckTensorDataTypesEqual(const TensorInfo& input0, const TensorInfo& input1)
66 return input0.GetDataType() == input1.GetDataType();
69 bool IsActivationSupported(const BackendId& backend,
70 const TensorInfo& input,
71 const TensorInfo& output,
72 const ActivationDescriptor& descriptor,
73 char* reasonIfUnsupported,
74 size_t reasonIfUnsupportedMaxLength)
76 FORWARD_LAYER_SUPPORT_FUNC(backend, IsActivationSupported, input, output, descriptor);
79 bool IsAdditionSupported(const BackendId& backend,
80 const TensorInfo& input0,
81 const TensorInfo& input1,
82 const TensorInfo& output,
83 char* reasonIfUnsupported,
84 size_t reasonIfUnsupportedMaxLength)
86 if(!CheckTensorDataTypesEqual(input0, input1))
91 FORWARD_LAYER_SUPPORT_FUNC(backend, IsAdditionSupported, input0, input1, output);
94 bool IsArgMinMaxSupported(const BackendId& backend,
95 const TensorInfo& input,
96 const TensorInfo& output,
97 const ArgMinMaxDescriptor& descriptor,
98 char* reasonIfUnsupported,
99 size_t reasonIfUnsupportedMaxLength)
101 FORWARD_LAYER_SUPPORT_FUNC(backend, IsArgMinMaxSupported, input, output, descriptor);
104 bool IsBatchNormalizationSupported(const BackendId& backend,
105 const TensorInfo& input,
106 const TensorInfo& output,
107 const TensorInfo& mean,
108 const TensorInfo& var,
109 const TensorInfo& beta,
110 const TensorInfo& gamma,
111 const BatchNormalizationDescriptor& descriptor,
112 char* reasonIfUnsupported,
113 size_t reasonIfUnsupportedMaxLength)
115 FORWARD_LAYER_SUPPORT_FUNC(backend,
116 IsBatchNormalizationSupported,
126 bool IsBatchToSpaceNdSupported(const BackendId& backend,
127 const TensorInfo& input,
128 const TensorInfo& output,
129 const BatchToSpaceNdDescriptor& descriptor,
130 char* reasonIfUnsupported,
131 size_t reasonIfUnsupportedMaxLength)
133 FORWARD_LAYER_SUPPORT_FUNC(backend,
134 IsBatchToSpaceNdSupported,
140 bool IsConcatSupported(const BackendId& backend,
141 std::vector<const TensorInfo*> inputs,
142 const TensorInfo& output,
143 const OriginsDescriptor& descriptor,
144 char* reasonIfUnsupported,
145 size_t reasonIfUnsupportedMaxLength)
147 BOOST_ASSERT(inputs.size() > 0);
149 FORWARD_LAYER_SUPPORT_FUNC(backend, IsConcatSupported, inputs, output, descriptor);
152 bool IsConstantSupported(const BackendId& backend,
153 const TensorInfo& output,
154 char* reasonIfUnsupported,
155 size_t reasonIfUnsupportedMaxLength)
157 FORWARD_LAYER_SUPPORT_FUNC(backend, IsConstantSupported, output);
160 bool IsConvertFp16ToFp32Supported(const BackendId& backend,
161 const TensorInfo& input,
162 const TensorInfo& output,
163 char* reasonIfUnsupported,
164 size_t reasonIfUnsupportedMaxLength)
166 FORWARD_LAYER_SUPPORT_FUNC(backend, IsConvertFp16ToFp32Supported, input, output);
169 bool IsConvertFp32ToFp16Supported(const BackendId& backend,
170 const TensorInfo& input,
171 const TensorInfo& output,
172 char* reasonIfUnsupported,
173 size_t reasonIfUnsupportedMaxLength)
175 FORWARD_LAYER_SUPPORT_FUNC(backend, IsConvertFp32ToFp16Supported, input, output);
178 bool IsConvolution2dSupported(const BackendId& backend,
179 const TensorInfo& input,
180 const TensorInfo& output,
181 const Convolution2dDescriptor& descriptor,
182 const TensorInfo& weights,
183 const Optional<TensorInfo>& biases,
184 char* reasonIfUnsupported,
185 size_t reasonIfUnsupportedMaxLength)
187 FORWARD_LAYER_SUPPORT_FUNC(backend, IsConvolution2dSupported, input, output, descriptor, weights, biases);
190 bool IsDebugSupported(const BackendId& backend,
191 const TensorInfo& input,
192 const TensorInfo& output,
193 char* reasonIfUnsupported,
194 size_t reasonIfUnsupportedMaxLength)
196 FORWARD_LAYER_SUPPORT_FUNC(backend, IsDebugSupported, input, output);
199 bool IsDepthwiseConvolutionSupported(const BackendId& backend,
200 const TensorInfo& input,
201 const TensorInfo& output,
202 const DepthwiseConvolution2dDescriptor& descriptor,
203 const TensorInfo& weights,
204 const Optional<TensorInfo>& biases,
205 char* reasonIfUnsupported,
206 size_t reasonIfUnsupportedMaxLength)
208 if (descriptor.m_DilationX == 1 && descriptor.m_DilationY == 1)
210 // Pre 19.05 ArmNN did not have the dilation parameters.
211 // This version of IsDepthwiseConvolutionSupported is called for backwards-compatibility
212 FORWARD_LAYER_SUPPORT_FUNC(backend,
213 IsDepthwiseConvolutionSupported,
222 FORWARD_LAYER_SUPPORT_FUNC(backend,
223 IsDilatedDepthwiseConvolutionSupported,
232 bool IsDequantizeSupported(const BackendId& backend,
233 const TensorInfo& input,
234 const TensorInfo& output,
235 char* reasonIfUnsupported,
236 size_t reasonIfUnsupportedMaxLength)
238 FORWARD_LAYER_SUPPORT_FUNC(backend, IsDequantizeSupported, input, output);
241 bool IsDetectionPostProcessSupported(const BackendId& backend,
242 const TensorInfo& input0,
243 const TensorInfo& input1,
244 const DetectionPostProcessDescriptor& descriptor,
245 char* reasonIfUnsupported,
246 size_t reasonIfUnsupportedMaxLength);
248 bool IsDivisionSupported(const BackendId& backend,
249 const TensorInfo& input0,
250 const TensorInfo& input1,
251 const TensorInfo& output,
252 char* reasonIfUnsupported,
253 size_t reasonIfUnsupportedMaxLength)
255 FORWARD_LAYER_SUPPORT_FUNC(backend, IsDivisionSupported, input0, input1, output);
258 bool IsEqualSupported(const BackendId& backend,
259 const TensorInfo& input0,
260 const TensorInfo& input1,
261 const TensorInfo& output,
262 char* reasonIfUnsupported,
263 size_t reasonIfUnsupportedMaxLength)
265 FORWARD_LAYER_SUPPORT_FUNC(backend, IsEqualSupported, input0, input1, output);
268 bool IsFakeQuantizationSupported(const BackendId& backend,
269 const TensorInfo& input,
270 const FakeQuantizationDescriptor& descriptor,
271 char* reasonIfUnsupported,
272 size_t reasonIfUnsupportedMaxLength)
274 FORWARD_LAYER_SUPPORT_FUNC(backend, IsFakeQuantizationSupported, input, descriptor);
277 bool IsFloorSupported(const BackendId& backend,
278 const TensorInfo& input,
279 const TensorInfo& output,
280 char* reasonIfUnsupported,
281 size_t reasonIfUnsupportedMaxLength)
283 // By definition (that is, regardless of compute device), shapes and data type must match.
284 if (input.GetShape() != output.GetShape() || input.GetDataType() != output.GetDataType())
289 FORWARD_LAYER_SUPPORT_FUNC(backend, IsFloorSupported, input, output);
291 bool IsFullyConnectedSupported(const BackendId& backend,
292 const TensorInfo& input,
293 const TensorInfo& output,
294 const TensorInfo& weights,
295 const TensorInfo& biases,
296 const FullyConnectedDescriptor& descriptor,
297 char* reasonIfUnsupported,
298 size_t reasonIfUnsupportedMaxLength)
300 FORWARD_LAYER_SUPPORT_FUNC(backend, IsFullyConnectedSupported, input, output, weights, biases, descriptor);
303 bool IsGatherSupported(const BackendId& backend,
304 const TensorInfo& input0,
305 const TensorInfo& input1,
306 const TensorInfo& output,
307 char* reasonIfUnsupported,
308 size_t reasonIfUnsupportedMaxLength)
310 FORWARD_LAYER_SUPPORT_FUNC(backend, IsGatherSupported, input0, input1, output);
313 bool IsGreaterSupported(const BackendId& backend,
314 const TensorInfo& input0,
315 const TensorInfo& input1,
316 const TensorInfo& output,
317 char* reasonIfUnsupported,
318 size_t reasonIfUnsupportedMaxLength)
320 FORWARD_LAYER_SUPPORT_FUNC(backend, IsGreaterSupported, input0, input1, output);
323 bool IsInputSupported(const BackendId& backend,
324 const TensorInfo& input,
325 char* reasonIfUnsupported,
326 size_t reasonIfUnsupportedMaxLength)
328 FORWARD_LAYER_SUPPORT_FUNC(backend, IsInputSupported, input);
332 bool IsL2NormalizationSupported(const BackendId& backend,
333 const TensorInfo& input,
334 const TensorInfo& output,
335 const L2NormalizationDescriptor& descriptor,
336 char* reasonIfUnsupported,
337 size_t reasonIfUnsupportedMaxLength)
339 FORWARD_LAYER_SUPPORT_FUNC(backend, IsL2NormalizationSupported, input, output, descriptor);
342 bool IsLstmSupported(const BackendId& backend, const TensorInfo& input, const TensorInfo& outputStateIn,
343 const TensorInfo& cellStateIn, const TensorInfo& scratchBuffer,
344 const TensorInfo& outputStateOut, const TensorInfo& cellStateOut,
345 const TensorInfo& output, const LstmDescriptor& descriptor,
346 const LstmInputParamsInfo& paramsInfo, char* reasonIfUnsupported,
347 size_t reasonIfUnsupportedMaxLength)
350 FORWARD_LAYER_SUPPORT_FUNC(backend, IsLstmSupported, input, outputStateIn, cellStateIn,
351 scratchBuffer, outputStateOut, cellStateOut,
352 output, descriptor, paramsInfo);
355 bool IsMaximumSupported(const BackendId& backend,
356 const TensorInfo& input0,
357 const TensorInfo& input1,
358 const TensorInfo& output,
359 char* reasonIfUnsupported,
360 size_t reasonIfUnsupportedMaxLength)
362 FORWARD_LAYER_SUPPORT_FUNC(backend, IsMaximumSupported, input0, input1, output);
365 bool IsMeanSupported(const BackendId& backend,
366 const TensorInfo& input,
367 const TensorInfo& output,
368 const MeanDescriptor& descriptor,
369 char* reasonIfUnsupported,
370 size_t reasonIfUnsupportedMaxLength)
372 FORWARD_LAYER_SUPPORT_FUNC(backend, IsMeanSupported, input, output, descriptor);
375 bool IsMemCopySupported(const BackendId &backend,
376 const TensorInfo &input,
377 const TensorInfo &output,
378 char *reasonIfUnsupported,
379 size_t reasonIfUnsupportedMaxLength)
381 FORWARD_LAYER_SUPPORT_FUNC(backend, IsMemCopySupported, input, output);
384 bool IsMemImportSupported(const BackendId &backend,
385 const TensorInfo &input,
386 const TensorInfo &output,
387 char *reasonIfUnsupported,
388 size_t reasonIfUnsupportedMaxLength)
390 FORWARD_LAYER_SUPPORT_FUNC(backend, IsMemImportSupported, input, output);
393 bool IsMergeSupported(const BackendId& backend,
394 const TensorInfo& input0,
395 const TensorInfo& input1,
396 const TensorInfo& output,
397 char* reasonIfUnsupported,
398 size_t reasonIfUnsupportedMaxLength)
400 FORWARD_LAYER_SUPPORT_FUNC(backend, IsMergeSupported, input0, input1, output);
403 ARMNN_DEPRECATED_MSG("Use IsConcatSupported instead")
404 bool IsMergerSupported(const BackendId& backend,
405 std::vector<const TensorInfo*> inputs,
406 const TensorInfo& output,
407 const OriginsDescriptor& descriptor,
408 char* reasonIfUnsupported,
409 size_t reasonIfUnsupportedMaxLength)
411 BOOST_ASSERT(inputs.size() > 0);
413 ARMNN_NO_DEPRECATE_WARN_BEGIN
414 FORWARD_LAYER_SUPPORT_FUNC(backend, IsMergerSupported, inputs, output, descriptor);
415 ARMNN_NO_DEPRECATE_WARN_END
418 bool IsMinimumSupported(const BackendId& backend,
419 const TensorInfo& input0,
420 const TensorInfo& input1,
421 const TensorInfo& output,
422 char* reasonIfUnsupported,
423 size_t reasonIfUnsupportedMaxLength)
425 FORWARD_LAYER_SUPPORT_FUNC(backend, IsMinimumSupported, input0, input1, output);
428 bool IsMultiplicationSupported(const BackendId& backend,
429 const TensorInfo& input0,
430 const TensorInfo& input1,
431 const TensorInfo& output,
432 char* reasonIfUnsupported,
433 size_t reasonIfUnsupportedMaxLength)
435 FORWARD_LAYER_SUPPORT_FUNC(backend, IsMultiplicationSupported, input0, input1, output);
438 bool IsNormalizationSupported(const BackendId& backend,
439 const TensorInfo& input,
440 const TensorInfo& output,
441 const NormalizationDescriptor& descriptor,
442 char* reasonIfUnsupported,
443 size_t reasonIfUnsupportedMaxLength)
445 FORWARD_LAYER_SUPPORT_FUNC(backend, IsNormalizationSupported, input, output, descriptor);
448 bool IsOutputSupported(const BackendId& backend,
449 const TensorInfo& output,
450 char* reasonIfUnsupported,
451 size_t reasonIfUnsupportedMaxLength)
453 FORWARD_LAYER_SUPPORT_FUNC(backend, IsOutputSupported, output);
456 bool IsPadSupported(const BackendId& backend,
457 const TensorInfo& input,
458 const TensorInfo& output,
459 const PadDescriptor& descriptor,
460 char* reasonIfUnsupported,
461 size_t reasonIfUnsupportedMaxLength)
464 FORWARD_LAYER_SUPPORT_FUNC(backend, IsPadSupported, input, output, descriptor);
467 bool IsQuantizeSupported(const BackendId& backend,
468 const TensorInfo& input,
469 const TensorInfo& output,
470 char* reasonIfUnsupported,
471 size_t reasonIfUnsupportedMaxLength)
473 FORWARD_LAYER_SUPPORT_FUNC(backend, IsQuantizeSupported, input, output);
476 bool IsQuantizedLstmSupported(const BackendId& backend,
477 const TensorInfo& input,
478 const TensorInfo& previousCellStateIn,
479 const TensorInfo& previousOutputIn,
480 const TensorInfo& cellStateOut,
481 const TensorInfo& output,
482 const QuantizedLstmInputParamsInfo& paramsInfo,
483 char* reasonIfUnsupported,
484 size_t reasonIfUnsupportedMaxLength)
487 FORWARD_LAYER_SUPPORT_FUNC(backend, IsQuantizedLstmSupported, input, previousCellStateIn, previousOutputIn,
488 cellStateOut, output, paramsInfo);
491 bool IsPermuteSupported(const BackendId& backend,
492 const TensorInfo& input,
493 const TensorInfo& output,
494 const PermuteDescriptor& descriptor,
495 char* reasonIfUnsupported,
496 size_t reasonIfUnsupportedMaxLength)
498 FORWARD_LAYER_SUPPORT_FUNC(backend, IsPermuteSupported, input, output, descriptor);
501 bool IsPooling2dSupported(const BackendId& backend,
502 const TensorInfo& input,
503 const TensorInfo& output,
504 const Pooling2dDescriptor& descriptor,
505 char* reasonIfUnsupported,
506 size_t reasonIfUnsupportedMaxLength)
508 FORWARD_LAYER_SUPPORT_FUNC(backend, IsPooling2dSupported, input, output, descriptor);
511 bool IsPreluSupported(const BackendId& backend,
512 const TensorInfo& input,
513 const TensorInfo& alpha,
514 const TensorInfo& output,
515 char* reasonIfUnsupported,
516 size_t reasonIfUnsupportedMaxLength)
518 FORWARD_LAYER_SUPPORT_FUNC(backend, IsPreluSupported, input, alpha, output);
521 bool IsReshapeSupported(const BackendId& backend,
522 const TensorInfo& input,
523 const ReshapeDescriptor& descriptor,
524 char* reasonIfUnsupported,
525 size_t reasonIfUnsupportedMaxLength)
527 FORWARD_LAYER_SUPPORT_FUNC(backend, IsReshapeSupported, input, descriptor);
530 bool IsResizeSupported(const BackendId& backend,
531 const TensorInfo& input,
532 const TensorInfo& output,
533 const ResizeDescriptor& descriptor,
534 char* reasonIfUnsupported,
535 size_t reasonIfUnsupportedMaxLength)
537 FORWARD_LAYER_SUPPORT_FUNC(backend, IsResizeSupported, input, output, descriptor);
540 ARMNN_DEPRECATED_MSG("Use IsResizeSupported instead")
541 bool IsResizeBilinearSupported(const BackendId& backend,
542 const TensorInfo& input,
543 const TensorInfo& output,
544 char* reasonIfUnsupported,
545 size_t reasonIfUnsupportedMaxLength)
547 ResizeDescriptor descriptor;
548 descriptor.m_Method = ResizeMethod::Bilinear;
550 const TensorShape& outputShape = output.GetShape();
551 descriptor.m_TargetWidth = outputShape[3];
552 descriptor.m_TargetHeight = outputShape[2];
554 FORWARD_LAYER_SUPPORT_FUNC(backend, IsResizeSupported, input, output, descriptor);
557 bool IsRsqrtSupported(const BackendId& backend,
558 const TensorInfo& input,
559 const TensorInfo& output,
560 char* reasonIfUnsupported,
561 size_t reasonIfUnsupportedMaxLength)
563 FORWARD_LAYER_SUPPORT_FUNC(backend, IsRsqrtSupported, input, output);
566 bool IsSoftmaxSupported(const BackendId& backend,
567 const TensorInfo& input,
568 const TensorInfo& output,
569 const SoftmaxDescriptor& descriptor,
570 char* reasonIfUnsupported,
571 size_t reasonIfUnsupportedMaxLength)
573 FORWARD_LAYER_SUPPORT_FUNC(backend, IsSoftmaxSupported, input, output, descriptor);
576 bool IsSpaceToBatchNdSupported(const BackendId& backend,
577 const TensorInfo& input,
578 const TensorInfo& output,
579 const SpaceToBatchNdDescriptor& descriptor,
580 char* reasonIfUnsupported,
581 size_t reasonIfUnsupportedMaxLength)
583 FORWARD_LAYER_SUPPORT_FUNC(backend, IsSpaceToBatchNdSupported, input, output, descriptor);
586 bool IsSpaceToDepthSupported(const BackendId& backend,
587 const TensorInfo& input,
588 const TensorInfo& output,
589 const SpaceToDepthDescriptor& descriptor,
590 char* reasonIfUnsupported,
591 size_t reasonIfUnsupportedMaxLength)
593 FORWARD_LAYER_SUPPORT_FUNC(backend, IsSpaceToDepthSupported, input, output, descriptor);
596 ARMNN_DEPRECATED_MSG("Use IsSplitterSupported with outputs instead")
597 bool IsSplitterSupported(const BackendId& backend,
598 const TensorInfo& input,
599 const ViewsDescriptor& descriptor,
600 char* reasonIfUnsupported,
601 size_t reasonIfUnsupportedMaxLength)
603 ARMNN_NO_DEPRECATE_WARN_BEGIN
604 FORWARD_LAYER_SUPPORT_FUNC(backend, IsSplitterSupported, input, descriptor);
605 ARMNN_NO_DEPRECATE_WARN_END
608 bool IsSplitterSupported(const BackendId& backend,
609 const TensorInfo& input,
610 const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
611 const ViewsDescriptor& descriptor,
612 char* reasonIfUnsupported,
613 size_t reasonIfUnsupportedMaxLength)
615 FORWARD_LAYER_SUPPORT_FUNC(backend, IsSplitterSupported, input, outputs, descriptor);
618 bool IsStridedSliceSupported(const BackendId& backend,
619 const TensorInfo& input,
620 const TensorInfo& output,
621 const StridedSliceDescriptor& descriptor,
622 char* reasonIfUnsupported,
623 size_t reasonIfUnsupportedMaxLength)
625 FORWARD_LAYER_SUPPORT_FUNC(backend, IsStridedSliceSupported, input, output, descriptor);
628 bool IsSubtractionSupported(const BackendId& backend,
629 const TensorInfo& input0,
630 const TensorInfo& input1,
631 const TensorInfo& output,
632 char* reasonIfUnsupported,
633 size_t reasonIfUnsupportedMaxLength)
635 FORWARD_LAYER_SUPPORT_FUNC(backend, IsSubtractionSupported, input0, input1, output);
638 bool IsSwitchSupported(const BackendId& backend,
639 const TensorInfo& input0,
640 const TensorInfo& input1,
641 const TensorInfo& output0,
642 const TensorInfo& output1,
643 char* reasonIfUnsupported,
644 size_t reasonIfUnsupportedMaxLength)
646 FORWARD_LAYER_SUPPORT_FUNC(backend, IsSwitchSupported, input0, input1, output0, output1);