d730205981a457fb3076573fa205e69dd4f022b2
[platform/upstream/armnn.git] / src / armnn / LayerSupport.cpp
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #include <armnn/LayerSupport.hpp>
6 #include <armnn/Optional.hpp>
7 #include <armnn/ILayerSupport.hpp>
8
9 #include <backendsCommon/BackendRegistry.hpp>
10 #include <backendsCommon/IBackendInternal.hpp>
11
12 #include <boost/assert.hpp>
13
14 #include <cstring>
15 #include <algorithm>
16 #include <unordered_map>
17 #include <armnn/ArmNN.hpp>
18
19 namespace
20 {
21
22 /// Helper function to copy a full string to a truncated version.
23 void CopyErrorMessage(char* truncatedString, const char* fullString, size_t maxLength)
24 {
25     if(truncatedString != nullptr)
26     {
27         std::snprintf(truncatedString, maxLength, "%s", fullString);
28     }
29 }
30
31 } // anonymous namespace
32
33 namespace armnn
34 {
35
36 // Helper macro to avoid code duplication.
37 // Forwards function func to funcRef, funcNeon or funcCl, depending on the value of backendId.
38 #define FORWARD_LAYER_SUPPORT_FUNC(backendId, func, ...) \
39     std::string reasonIfUnsupportedFull; \
40     bool isSupported; \
41     try { \
42         auto const& backendRegistry = BackendRegistryInstance(); \
43         if (!backendRegistry.IsBackendRegistered(backendId)) \
44         { \
45             std::stringstream ss; \
46             ss << __func__ << " is not supported on " << backendId << " because this backend is not registered."; \
47             reasonIfUnsupportedFull = ss.str(); \
48             isSupported = false; \
49         } \
50         else \
51         { \
52             auto factoryFunc = backendRegistry.GetFactory(backendId); \
53             auto backendObject = factoryFunc(); \
54             auto layerSupportObject = backendObject->GetLayerSupport(); \
55             isSupported = layerSupportObject->func(__VA_ARGS__, Optional<std::string&>(reasonIfUnsupportedFull)); \
56             CopyErrorMessage(reasonIfUnsupported, reasonIfUnsupportedFull.c_str(), reasonIfUnsupportedMaxLength); \
57         } \
58     } catch (const InvalidArgumentException &e) { \
59         /* re-throwing with more context information */ \
60         throw InvalidArgumentException(e, "Failed to check layer support", CHECK_LOCATION()); \
61     } \
62     return isSupported;
63
64 bool CheckTensorDataTypesEqual(const TensorInfo& input0, const TensorInfo& input1)
65 {
66     return input0.GetDataType() == input1.GetDataType();
67 }
68
69 bool IsActivationSupported(const BackendId& backend,
70                            const TensorInfo& input,
71                            const TensorInfo& output,
72                            const ActivationDescriptor& descriptor,
73                            char* reasonIfUnsupported,
74                            size_t reasonIfUnsupportedMaxLength)
75 {
76     FORWARD_LAYER_SUPPORT_FUNC(backend, IsActivationSupported, input, output, descriptor);
77 }
78
79 bool IsAdditionSupported(const BackendId& backend,
80                          const TensorInfo& input0,
81                          const TensorInfo& input1,
82                          const TensorInfo& output,
83                          char* reasonIfUnsupported,
84                          size_t reasonIfUnsupportedMaxLength)
85 {
86     if(!CheckTensorDataTypesEqual(input0, input1))
87     {
88         return false;
89     }
90
91     FORWARD_LAYER_SUPPORT_FUNC(backend, IsAdditionSupported, input0, input1, output);
92 }
93
94 bool IsBatchNormalizationSupported(const BackendId& backend,
95                                    const TensorInfo& input,
96                                    const TensorInfo& output,
97                                    const TensorInfo& mean,
98                                    const TensorInfo& var,
99                                    const TensorInfo& beta,
100                                    const TensorInfo& gamma,
101                                    const BatchNormalizationDescriptor& descriptor,
102                                    char* reasonIfUnsupported,
103                                    size_t reasonIfUnsupportedMaxLength)
104 {
105     FORWARD_LAYER_SUPPORT_FUNC(backend,
106                                IsBatchNormalizationSupported,
107                                input,
108                                output,
109                                mean,
110                                var,
111                                beta,
112                                gamma,
113                                descriptor);
114 }
115
116 bool IsBatchToSpaceNdSupported(const BackendId& backend,
117                                const TensorInfo& input,
118                                const TensorInfo& output,
119                                const BatchToSpaceNdDescriptor& descriptor,
120                                char* reasonIfUnsupported,
121                                size_t reasonIfUnsupportedMaxLength)
122 {
123     FORWARD_LAYER_SUPPORT_FUNC(backend,
124                                IsBatchToSpaceNdSupported,
125                                input,
126                                output,
127                                descriptor);
128 }
129
130 bool IsConcatSupported(const BackendId& backend,
131                        std::vector<const TensorInfo*> inputs,
132                        const TensorInfo& output,
133                        const OriginsDescriptor& descriptor,
134                        char* reasonIfUnsupported,
135                        size_t reasonIfUnsupportedMaxLength)
136 {
137     BOOST_ASSERT(inputs.size() > 0);
138
139     FORWARD_LAYER_SUPPORT_FUNC(backend, IsConcatSupported, inputs, output, descriptor);
140 }
141
142 bool IsConstantSupported(const BackendId& backend,
143                          const TensorInfo& output,
144                          char* reasonIfUnsupported,
145                          size_t reasonIfUnsupportedMaxLength)
146 {
147     FORWARD_LAYER_SUPPORT_FUNC(backend, IsConstantSupported, output);
148 }
149
150 bool IsConvertFp16ToFp32Supported(const BackendId& backend,
151                                   const TensorInfo& input,
152                                   const TensorInfo& output,
153                                   char* reasonIfUnsupported,
154                                   size_t reasonIfUnsupportedMaxLength)
155 {
156     FORWARD_LAYER_SUPPORT_FUNC(backend, IsConvertFp16ToFp32Supported, input, output);
157 }
158
159 bool IsConvertFp32ToFp16Supported(const BackendId& backend,
160                                   const TensorInfo& input,
161                                   const TensorInfo& output,
162                                   char* reasonIfUnsupported,
163                                   size_t reasonIfUnsupportedMaxLength)
164 {
165     FORWARD_LAYER_SUPPORT_FUNC(backend, IsConvertFp32ToFp16Supported, input, output);
166 }
167
168 bool IsConvolution2dSupported(const BackendId& backend,
169                               const TensorInfo& input,
170                               const TensorInfo& output,
171                               const Convolution2dDescriptor& descriptor,
172                               const TensorInfo& weights,
173                               const Optional<TensorInfo>& biases,
174                               char* reasonIfUnsupported,
175                               size_t reasonIfUnsupportedMaxLength)
176 {
177     FORWARD_LAYER_SUPPORT_FUNC(backend, IsConvolution2dSupported, input, output, descriptor, weights, biases);
178 }
179
180 bool IsDebugSupported(const BackendId& backend,
181                       const TensorInfo& input,
182                       const TensorInfo& output,
183                       char* reasonIfUnsupported,
184                       size_t reasonIfUnsupportedMaxLength)
185 {
186     FORWARD_LAYER_SUPPORT_FUNC(backend, IsDebugSupported, input, output);
187 }
188
189 bool IsDepthwiseConvolutionSupported(const BackendId& backend,
190                                      const TensorInfo& input,
191                                      const TensorInfo& output,
192                                      const DepthwiseConvolution2dDescriptor& descriptor,
193                                      const TensorInfo& weights,
194                                      const Optional<TensorInfo>& biases,
195                                      char* reasonIfUnsupported,
196                                      size_t reasonIfUnsupportedMaxLength)
197 {
198     if (descriptor.m_DilationX == 1 && descriptor.m_DilationY == 1)
199     {
200         // Pre 19.05 ArmNN did not have the dilation parameters.
201         // This version of IsDepthwiseConvolutionSupported is called for backwards-compatibility
202         FORWARD_LAYER_SUPPORT_FUNC(backend,
203                                    IsDepthwiseConvolutionSupported,
204                                    input,
205                                    output,
206                                    descriptor,
207                                    weights,
208                                    biases);
209     }
210     else
211     {
212         FORWARD_LAYER_SUPPORT_FUNC(backend,
213                                    IsDilatedDepthwiseConvolutionSupported,
214                                    input,
215                                    output,
216                                    descriptor,
217                                    weights,
218                                    biases);
219     }
220 }
221
222 bool IsDequantizeSupported(const BackendId& backend,
223                            const TensorInfo& input,
224                            const TensorInfo& output,
225                            char* reasonIfUnsupported,
226                            size_t reasonIfUnsupportedMaxLength)
227 {
228     FORWARD_LAYER_SUPPORT_FUNC(backend, IsDequantizeSupported, input, output);
229 }
230
231 bool IsDetectionPostProcessSupported(const BackendId& backend,
232                                      const TensorInfo& input0,
233                                      const TensorInfo& input1,
234                                      const DetectionPostProcessDescriptor& descriptor,
235                                      char* reasonIfUnsupported,
236                                      size_t reasonIfUnsupportedMaxLength);
237
238 bool IsDivisionSupported(const BackendId& backend,
239                          const TensorInfo& input0,
240                          const TensorInfo& input1,
241                          const TensorInfo& output,
242                          char* reasonIfUnsupported,
243                          size_t reasonIfUnsupportedMaxLength)
244 {
245     FORWARD_LAYER_SUPPORT_FUNC(backend, IsDivisionSupported, input0, input1, output);
246 }
247
248 bool IsEqualSupported(const BackendId& backend,
249                       const TensorInfo& input0,
250                       const TensorInfo& input1,
251                       const TensorInfo& output,
252                       char* reasonIfUnsupported,
253                       size_t reasonIfUnsupportedMaxLength)
254 {
255     FORWARD_LAYER_SUPPORT_FUNC(backend, IsEqualSupported, input0, input1, output);
256 }
257
258 bool IsFakeQuantizationSupported(const BackendId& backend,
259                                  const TensorInfo& input,
260                                  const FakeQuantizationDescriptor& descriptor,
261                                  char* reasonIfUnsupported,
262                                  size_t reasonIfUnsupportedMaxLength)
263 {
264     FORWARD_LAYER_SUPPORT_FUNC(backend, IsFakeQuantizationSupported, input, descriptor);
265 }
266
267 bool IsFloorSupported(const BackendId& backend,
268                       const TensorInfo& input,
269                       const TensorInfo& output,
270                       char* reasonIfUnsupported,
271                       size_t reasonIfUnsupportedMaxLength)
272 {
273     // By definition (that is, regardless of compute device), shapes and data type must match.
274     if (input.GetShape() != output.GetShape() || input.GetDataType() != output.GetDataType())
275     {
276         return false;
277     }
278
279     FORWARD_LAYER_SUPPORT_FUNC(backend, IsFloorSupported, input, output);
280 }
281 bool IsFullyConnectedSupported(const BackendId& backend,
282                                const TensorInfo& input,
283                                const TensorInfo& output,
284                                const TensorInfo& weights,
285                                const TensorInfo& biases,
286                                const FullyConnectedDescriptor& descriptor,
287                                char* reasonIfUnsupported,
288                                size_t reasonIfUnsupportedMaxLength)
289 {
290     FORWARD_LAYER_SUPPORT_FUNC(backend, IsFullyConnectedSupported, input, output, weights, biases, descriptor);
291 }
292
293 bool IsGatherSupported(const BackendId& backend,
294                        const TensorInfo& input0,
295                        const TensorInfo& input1,
296                        const TensorInfo& output,
297                        char* reasonIfUnsupported,
298                        size_t reasonIfUnsupportedMaxLength)
299 {
300     FORWARD_LAYER_SUPPORT_FUNC(backend, IsGatherSupported, input0, input1, output);
301 }
302
303 bool IsGreaterSupported(const BackendId& backend,
304                         const TensorInfo& input0,
305                         const TensorInfo& input1,
306                         const TensorInfo& output,
307                         char* reasonIfUnsupported,
308                         size_t reasonIfUnsupportedMaxLength)
309 {
310     FORWARD_LAYER_SUPPORT_FUNC(backend, IsGreaterSupported, input0, input1, output);
311 }
312
313 bool IsInputSupported(const BackendId& backend,
314                       const TensorInfo& input,
315                       char* reasonIfUnsupported,
316                       size_t reasonIfUnsupportedMaxLength)
317 {
318     FORWARD_LAYER_SUPPORT_FUNC(backend, IsInputSupported, input);
319 }
320
321
322 bool IsL2NormalizationSupported(const BackendId& backend,
323                                 const TensorInfo& input,
324                                 const TensorInfo& output,
325                                 const L2NormalizationDescriptor& descriptor,
326                                 char* reasonIfUnsupported,
327                                 size_t reasonIfUnsupportedMaxLength)
328 {
329     FORWARD_LAYER_SUPPORT_FUNC(backend, IsL2NormalizationSupported, input, output, descriptor);
330 }
331
332 bool IsLstmSupported(const BackendId& backend, const TensorInfo& input, const TensorInfo& outputStateIn,
333                      const TensorInfo& cellStateIn, const TensorInfo& scratchBuffer,
334                      const TensorInfo& outputStateOut, const TensorInfo& cellStateOut,
335                      const TensorInfo& output, const LstmDescriptor& descriptor,
336                      const LstmInputParamsInfo& paramsInfo, char* reasonIfUnsupported,
337                      size_t reasonIfUnsupportedMaxLength)
338
339 {
340     FORWARD_LAYER_SUPPORT_FUNC(backend, IsLstmSupported, input, outputStateIn, cellStateIn,
341                                scratchBuffer, outputStateOut, cellStateOut,
342                                output, descriptor, paramsInfo);
343 }
344
345 bool IsMaximumSupported(const BackendId& backend,
346                         const TensorInfo& input0,
347                         const TensorInfo& input1,
348                         const TensorInfo& output,
349                         char* reasonIfUnsupported,
350                         size_t reasonIfUnsupportedMaxLength)
351 {
352     FORWARD_LAYER_SUPPORT_FUNC(backend, IsMaximumSupported, input0, input1, output);
353 }
354
355 bool IsMeanSupported(const BackendId& backend,
356                      const TensorInfo& input,
357                      const TensorInfo& output,
358                      const MeanDescriptor& descriptor,
359                      char* reasonIfUnsupported,
360                      size_t reasonIfUnsupportedMaxLength)
361 {
362     FORWARD_LAYER_SUPPORT_FUNC(backend, IsMeanSupported, input, output, descriptor);
363 }
364
365 bool IsMemCopySupported(const BackendId &backend,
366                         const TensorInfo &input,
367                         const TensorInfo &output,
368                         char *reasonIfUnsupported,
369                         size_t reasonIfUnsupportedMaxLength)
370 {
371     FORWARD_LAYER_SUPPORT_FUNC(backend, IsMemCopySupported, input, output);
372 }
373
374 bool IsMemImportSupported(const BackendId &backend,
375                           const TensorInfo &input,
376                           const TensorInfo &output,
377                           char *reasonIfUnsupported,
378                           size_t reasonIfUnsupportedMaxLength)
379 {
380     FORWARD_LAYER_SUPPORT_FUNC(backend, IsMemImportSupported, input, output);
381 }
382
383 bool IsMergeSupported(const BackendId& backend,
384                       const TensorInfo& input0,
385                       const TensorInfo& input1,
386                       const TensorInfo& output,
387                       char* reasonIfUnsupported,
388                       size_t reasonIfUnsupportedMaxLength)
389 {
390     FORWARD_LAYER_SUPPORT_FUNC(backend, IsMergeSupported, input0, input1, output);
391 }
392
393 ARMNN_DEPRECATED_MSG("Use IsConcatSupported instead")
394 bool IsMergerSupported(const BackendId& backend,
395                        std::vector<const TensorInfo*> inputs,
396                        const TensorInfo& output,
397                        const OriginsDescriptor& descriptor,
398                        char* reasonIfUnsupported,
399                        size_t reasonIfUnsupportedMaxLength)
400 {
401     BOOST_ASSERT(inputs.size() > 0);
402
403     ARMNN_NO_DEPRECATE_WARN_BEGIN
404     FORWARD_LAYER_SUPPORT_FUNC(backend, IsMergerSupported, inputs, output, descriptor);
405     ARMNN_NO_DEPRECATE_WARN_END
406 }
407
408 bool IsMinimumSupported(const BackendId& backend,
409                         const TensorInfo& input0,
410                         const TensorInfo& input1,
411                         const TensorInfo& output,
412                         char* reasonIfUnsupported,
413                         size_t reasonIfUnsupportedMaxLength)
414 {
415     FORWARD_LAYER_SUPPORT_FUNC(backend, IsMinimumSupported, input0, input1, output);
416 }
417
418 bool IsMultiplicationSupported(const BackendId& backend,
419                                const TensorInfo& input0,
420                                const TensorInfo& input1,
421                                const TensorInfo& output,
422                                char* reasonIfUnsupported,
423                                size_t reasonIfUnsupportedMaxLength)
424 {
425     FORWARD_LAYER_SUPPORT_FUNC(backend, IsMultiplicationSupported, input0, input1, output);
426 }
427
428 bool IsNormalizationSupported(const BackendId& backend,
429                               const TensorInfo& input,
430                               const TensorInfo& output,
431                               const NormalizationDescriptor& descriptor,
432                               char* reasonIfUnsupported,
433                               size_t reasonIfUnsupportedMaxLength)
434 {
435     FORWARD_LAYER_SUPPORT_FUNC(backend, IsNormalizationSupported, input, output, descriptor);
436 }
437
438 bool IsOutputSupported(const BackendId& backend,
439                        const TensorInfo& output,
440                        char* reasonIfUnsupported,
441                        size_t reasonIfUnsupportedMaxLength)
442 {
443     FORWARD_LAYER_SUPPORT_FUNC(backend, IsOutputSupported, output);
444 }
445
446 bool IsPadSupported(const BackendId& backend,
447                     const TensorInfo& input,
448                     const TensorInfo& output,
449                     const PadDescriptor& descriptor,
450                     char* reasonIfUnsupported,
451                     size_t reasonIfUnsupportedMaxLength)
452 {
453
454     FORWARD_LAYER_SUPPORT_FUNC(backend, IsPadSupported, input, output, descriptor);
455 }
456
457 bool IsQuantizeSupported(const BackendId& backend,
458                          const TensorInfo& input,
459                          const TensorInfo& output,
460                          char* reasonIfUnsupported,
461                          size_t reasonIfUnsupportedMaxLength)
462 {
463     FORWARD_LAYER_SUPPORT_FUNC(backend, IsQuantizeSupported, input, output);
464 }
465
466 bool IsQuantizedLstmSupported(const BackendId& backend,
467                               const TensorInfo& input,
468                               const TensorInfo& previousCellStateIn,
469                               const TensorInfo& previousOutputIn,
470                               const TensorInfo& cellStateOut,
471                               const TensorInfo& output,
472                               const QuantizedLstmInputParamsInfo& paramsInfo,
473                               char* reasonIfUnsupported,
474                               size_t reasonIfUnsupportedMaxLength)
475
476 {
477     FORWARD_LAYER_SUPPORT_FUNC(backend, IsQuantizedLstmSupported, input, previousCellStateIn, previousOutputIn,
478                                cellStateOut, output, paramsInfo);
479 }
480
481 bool IsPermuteSupported(const BackendId& backend,
482                         const TensorInfo& input,
483                         const TensorInfo& output,
484                         const PermuteDescriptor& descriptor,
485                         char* reasonIfUnsupported,
486                         size_t reasonIfUnsupportedMaxLength)
487 {
488     FORWARD_LAYER_SUPPORT_FUNC(backend, IsPermuteSupported, input, output, descriptor);
489 }
490
491 bool IsPooling2dSupported(const BackendId& backend,
492                           const TensorInfo& input,
493                           const TensorInfo& output,
494                           const Pooling2dDescriptor& descriptor,
495                           char* reasonIfUnsupported,
496                           size_t reasonIfUnsupportedMaxLength)
497 {
498     FORWARD_LAYER_SUPPORT_FUNC(backend, IsPooling2dSupported, input, output, descriptor);
499 }
500
501 bool IsPreluSupported(const BackendId& backend,
502                       const TensorInfo& input,
503                       const TensorInfo& alpha,
504                       const TensorInfo& output,
505                       char* reasonIfUnsupported,
506                       size_t reasonIfUnsupportedMaxLength)
507 {
508     FORWARD_LAYER_SUPPORT_FUNC(backend, IsPreluSupported, input, alpha, output);
509 }
510
511 bool IsReshapeSupported(const BackendId& backend,
512                         const TensorInfo& input,
513                         const ReshapeDescriptor& descriptor,
514                         char* reasonIfUnsupported,
515                         size_t reasonIfUnsupportedMaxLength)
516 {
517     FORWARD_LAYER_SUPPORT_FUNC(backend, IsReshapeSupported, input, descriptor);
518 }
519
520 bool IsResizeSupported(const BackendId& backend,
521                        const TensorInfo& input,
522                        const TensorInfo& output,
523                        const ResizeDescriptor& descriptor,
524                        char* reasonIfUnsupported,
525                        size_t reasonIfUnsupportedMaxLength)
526 {
527     FORWARD_LAYER_SUPPORT_FUNC(backend, IsResizeSupported, input, output, descriptor);
528 }
529
530 ARMNN_DEPRECATED_MSG("Use IsResizeSupported instead")
531 bool IsResizeBilinearSupported(const BackendId& backend,
532                                const TensorInfo& input,
533                                const TensorInfo& output,
534                                char* reasonIfUnsupported,
535                                size_t reasonIfUnsupportedMaxLength)
536 {
537     ResizeDescriptor descriptor;
538     descriptor.m_Method = ResizeMethod::Bilinear;
539
540     const TensorShape& outputShape = output.GetShape();
541     descriptor.m_TargetWidth  = outputShape[3];
542     descriptor.m_TargetHeight = outputShape[2];
543
544     FORWARD_LAYER_SUPPORT_FUNC(backend, IsResizeSupported, input, output, descriptor);
545 }
546
547 bool IsRsqrtSupported(const BackendId& backend,
548                       const TensorInfo& input,
549                       const TensorInfo& output,
550                       char* reasonIfUnsupported,
551                       size_t reasonIfUnsupportedMaxLength)
552 {
553     FORWARD_LAYER_SUPPORT_FUNC(backend, IsRsqrtSupported, input, output);
554 }
555
556 bool IsSoftmaxSupported(const BackendId& backend,
557                         const TensorInfo& input,
558                         const TensorInfo& output,
559                         const SoftmaxDescriptor& descriptor,
560                         char* reasonIfUnsupported,
561                         size_t reasonIfUnsupportedMaxLength)
562 {
563     FORWARD_LAYER_SUPPORT_FUNC(backend, IsSoftmaxSupported, input, output, descriptor);
564 }
565
566 bool IsSpaceToBatchNdSupported(const BackendId& backend,
567                                const TensorInfo& input,
568                                const TensorInfo& output,
569                                const SpaceToBatchNdDescriptor& descriptor,
570                                char* reasonIfUnsupported,
571                                size_t reasonIfUnsupportedMaxLength)
572 {
573     FORWARD_LAYER_SUPPORT_FUNC(backend, IsSpaceToBatchNdSupported, input, output, descriptor);
574 }
575
576 bool IsSpaceToDepthSupported(const BackendId& backend,
577                              const TensorInfo& input,
578                              const TensorInfo& output,
579                              const SpaceToDepthDescriptor& descriptor,
580                              char* reasonIfUnsupported,
581                              size_t reasonIfUnsupportedMaxLength)
582 {
583     FORWARD_LAYER_SUPPORT_FUNC(backend, IsSpaceToDepthSupported, input, output, descriptor);
584 }
585
586 ARMNN_DEPRECATED_MSG("Use IsSplitterSupported with outputs instead")
587 bool IsSplitterSupported(const BackendId& backend,
588                          const TensorInfo& input,
589                          const ViewsDescriptor& descriptor,
590                          char* reasonIfUnsupported,
591                          size_t reasonIfUnsupportedMaxLength)
592 {
593     ARMNN_NO_DEPRECATE_WARN_BEGIN
594     FORWARD_LAYER_SUPPORT_FUNC(backend, IsSplitterSupported, input, descriptor);
595     ARMNN_NO_DEPRECATE_WARN_END
596 }
597
598 bool IsSplitterSupported(const BackendId& backend,
599                          const TensorInfo& input,
600                          const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
601                          const ViewsDescriptor& descriptor,
602                          char* reasonIfUnsupported,
603                          size_t reasonIfUnsupportedMaxLength)
604 {
605     FORWARD_LAYER_SUPPORT_FUNC(backend, IsSplitterSupported, input, outputs, descriptor);
606 }
607
608 bool IsStridedSliceSupported(const BackendId& backend,
609                              const TensorInfo& input,
610                              const TensorInfo& output,
611                              const StridedSliceDescriptor& descriptor,
612                              char* reasonIfUnsupported,
613                              size_t reasonIfUnsupportedMaxLength)
614 {
615     FORWARD_LAYER_SUPPORT_FUNC(backend, IsStridedSliceSupported, input, output, descriptor);
616 }
617
618 bool IsSubtractionSupported(const BackendId& backend,
619                             const TensorInfo& input0,
620                             const TensorInfo& input1,
621                             const TensorInfo& output,
622                             char* reasonIfUnsupported,
623                             size_t reasonIfUnsupportedMaxLength)
624 {
625     FORWARD_LAYER_SUPPORT_FUNC(backend, IsSubtractionSupported, input0, input1, output);
626 }
627
628 bool IsSwitchSupported(const BackendId& backend,
629                        const TensorInfo& input0,
630                        const TensorInfo& input1,
631                        const TensorInfo& output0,
632                        const TensorInfo& output1,
633                        char* reasonIfUnsupported,
634                        size_t reasonIfUnsupportedMaxLength)
635 {
636     FORWARD_LAYER_SUPPORT_FUNC(backend, IsSwitchSupported, input0, input1, output0, output1);
637 }
638
639 } // namespace armnn