IVGCVSW-3722 Add front end support for ArgMinMax
[platform/upstream/armnn.git] / src / armnn / LayerSupport.cpp
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #include <armnn/LayerSupport.hpp>
6 #include <armnn/Optional.hpp>
7 #include <armnn/ILayerSupport.hpp>
8
9 #include <backendsCommon/BackendRegistry.hpp>
10 #include <backendsCommon/IBackendInternal.hpp>
11
12 #include <boost/assert.hpp>
13
14 #include <cstring>
15 #include <algorithm>
16 #include <unordered_map>
17 #include <armnn/ArmNN.hpp>
18
19 namespace
20 {
21
22 /// Helper function to copy a full string to a truncated version.
23 void CopyErrorMessage(char* truncatedString, const char* fullString, size_t maxLength)
24 {
25     if(truncatedString != nullptr)
26     {
27         std::snprintf(truncatedString, maxLength, "%s", fullString);
28     }
29 }
30
31 } // anonymous namespace
32
33 namespace armnn
34 {
35
36 // Helper macro to avoid code duplication.
37 // Forwards function func to funcRef, funcNeon or funcCl, depending on the value of backendId.
38 #define FORWARD_LAYER_SUPPORT_FUNC(backendId, func, ...) \
39     std::string reasonIfUnsupportedFull; \
40     bool isSupported; \
41     try { \
42         auto const& backendRegistry = BackendRegistryInstance(); \
43         if (!backendRegistry.IsBackendRegistered(backendId)) \
44         { \
45             std::stringstream ss; \
46             ss << __func__ << " is not supported on " << backendId << " because this backend is not registered."; \
47             reasonIfUnsupportedFull = ss.str(); \
48             isSupported = false; \
49         } \
50         else \
51         { \
52             auto factoryFunc = backendRegistry.GetFactory(backendId); \
53             auto backendObject = factoryFunc(); \
54             auto layerSupportObject = backendObject->GetLayerSupport(); \
55             isSupported = layerSupportObject->func(__VA_ARGS__, Optional<std::string&>(reasonIfUnsupportedFull)); \
56             CopyErrorMessage(reasonIfUnsupported, reasonIfUnsupportedFull.c_str(), reasonIfUnsupportedMaxLength); \
57         } \
58     } catch (const InvalidArgumentException &e) { \
59         /* re-throwing with more context information */ \
60         throw InvalidArgumentException(e, "Failed to check layer support", CHECK_LOCATION()); \
61     } \
62     return isSupported;
63
64 bool CheckTensorDataTypesEqual(const TensorInfo& input0, const TensorInfo& input1)
65 {
66     return input0.GetDataType() == input1.GetDataType();
67 }
68
69 bool IsActivationSupported(const BackendId& backend,
70                            const TensorInfo& input,
71                            const TensorInfo& output,
72                            const ActivationDescriptor& descriptor,
73                            char* reasonIfUnsupported,
74                            size_t reasonIfUnsupportedMaxLength)
75 {
76     FORWARD_LAYER_SUPPORT_FUNC(backend, IsActivationSupported, input, output, descriptor);
77 }
78
79 bool IsAdditionSupported(const BackendId& backend,
80                          const TensorInfo& input0,
81                          const TensorInfo& input1,
82                          const TensorInfo& output,
83                          char* reasonIfUnsupported,
84                          size_t reasonIfUnsupportedMaxLength)
85 {
86     if(!CheckTensorDataTypesEqual(input0, input1))
87     {
88         return false;
89     }
90
91     FORWARD_LAYER_SUPPORT_FUNC(backend, IsAdditionSupported, input0, input1, output);
92 }
93
94 bool IsArgMinMaxSupported(const BackendId& backend,
95                           const TensorInfo& input,
96                           const TensorInfo& output,
97                           const ArgMinMaxDescriptor& descriptor,
98                           char* reasonIfUnsupported,
99                           size_t reasonIfUnsupportedMaxLength)
100 {
101     FORWARD_LAYER_SUPPORT_FUNC(backend, IsArgMinMaxSupported, input, output, descriptor);
102 }
103
104 bool IsBatchNormalizationSupported(const BackendId& backend,
105                                    const TensorInfo& input,
106                                    const TensorInfo& output,
107                                    const TensorInfo& mean,
108                                    const TensorInfo& var,
109                                    const TensorInfo& beta,
110                                    const TensorInfo& gamma,
111                                    const BatchNormalizationDescriptor& descriptor,
112                                    char* reasonIfUnsupported,
113                                    size_t reasonIfUnsupportedMaxLength)
114 {
115     FORWARD_LAYER_SUPPORT_FUNC(backend,
116                                IsBatchNormalizationSupported,
117                                input,
118                                output,
119                                mean,
120                                var,
121                                beta,
122                                gamma,
123                                descriptor);
124 }
125
126 bool IsBatchToSpaceNdSupported(const BackendId& backend,
127                                const TensorInfo& input,
128                                const TensorInfo& output,
129                                const BatchToSpaceNdDescriptor& descriptor,
130                                char* reasonIfUnsupported,
131                                size_t reasonIfUnsupportedMaxLength)
132 {
133     FORWARD_LAYER_SUPPORT_FUNC(backend,
134                                IsBatchToSpaceNdSupported,
135                                input,
136                                output,
137                                descriptor);
138 }
139
140 bool IsConcatSupported(const BackendId& backend,
141                        std::vector<const TensorInfo*> inputs,
142                        const TensorInfo& output,
143                        const OriginsDescriptor& descriptor,
144                        char* reasonIfUnsupported,
145                        size_t reasonIfUnsupportedMaxLength)
146 {
147     BOOST_ASSERT(inputs.size() > 0);
148
149     FORWARD_LAYER_SUPPORT_FUNC(backend, IsConcatSupported, inputs, output, descriptor);
150 }
151
152 bool IsConstantSupported(const BackendId& backend,
153                          const TensorInfo& output,
154                          char* reasonIfUnsupported,
155                          size_t reasonIfUnsupportedMaxLength)
156 {
157     FORWARD_LAYER_SUPPORT_FUNC(backend, IsConstantSupported, output);
158 }
159
160 bool IsConvertFp16ToFp32Supported(const BackendId& backend,
161                                   const TensorInfo& input,
162                                   const TensorInfo& output,
163                                   char* reasonIfUnsupported,
164                                   size_t reasonIfUnsupportedMaxLength)
165 {
166     FORWARD_LAYER_SUPPORT_FUNC(backend, IsConvertFp16ToFp32Supported, input, output);
167 }
168
169 bool IsConvertFp32ToFp16Supported(const BackendId& backend,
170                                   const TensorInfo& input,
171                                   const TensorInfo& output,
172                                   char* reasonIfUnsupported,
173                                   size_t reasonIfUnsupportedMaxLength)
174 {
175     FORWARD_LAYER_SUPPORT_FUNC(backend, IsConvertFp32ToFp16Supported, input, output);
176 }
177
178 bool IsConvolution2dSupported(const BackendId& backend,
179                               const TensorInfo& input,
180                               const TensorInfo& output,
181                               const Convolution2dDescriptor& descriptor,
182                               const TensorInfo& weights,
183                               const Optional<TensorInfo>& biases,
184                               char* reasonIfUnsupported,
185                               size_t reasonIfUnsupportedMaxLength)
186 {
187     FORWARD_LAYER_SUPPORT_FUNC(backend, IsConvolution2dSupported, input, output, descriptor, weights, biases);
188 }
189
190 bool IsDebugSupported(const BackendId& backend,
191                       const TensorInfo& input,
192                       const TensorInfo& output,
193                       char* reasonIfUnsupported,
194                       size_t reasonIfUnsupportedMaxLength)
195 {
196     FORWARD_LAYER_SUPPORT_FUNC(backend, IsDebugSupported, input, output);
197 }
198
199 bool IsDepthwiseConvolutionSupported(const BackendId& backend,
200                                      const TensorInfo& input,
201                                      const TensorInfo& output,
202                                      const DepthwiseConvolution2dDescriptor& descriptor,
203                                      const TensorInfo& weights,
204                                      const Optional<TensorInfo>& biases,
205                                      char* reasonIfUnsupported,
206                                      size_t reasonIfUnsupportedMaxLength)
207 {
208     if (descriptor.m_DilationX == 1 && descriptor.m_DilationY == 1)
209     {
210         // Pre 19.05 ArmNN did not have the dilation parameters.
211         // This version of IsDepthwiseConvolutionSupported is called for backwards-compatibility
212         FORWARD_LAYER_SUPPORT_FUNC(backend,
213                                    IsDepthwiseConvolutionSupported,
214                                    input,
215                                    output,
216                                    descriptor,
217                                    weights,
218                                    biases);
219     }
220     else
221     {
222         FORWARD_LAYER_SUPPORT_FUNC(backend,
223                                    IsDilatedDepthwiseConvolutionSupported,
224                                    input,
225                                    output,
226                                    descriptor,
227                                    weights,
228                                    biases);
229     }
230 }
231
232 bool IsDequantizeSupported(const BackendId& backend,
233                            const TensorInfo& input,
234                            const TensorInfo& output,
235                            char* reasonIfUnsupported,
236                            size_t reasonIfUnsupportedMaxLength)
237 {
238     FORWARD_LAYER_SUPPORT_FUNC(backend, IsDequantizeSupported, input, output);
239 }
240
241 bool IsDetectionPostProcessSupported(const BackendId& backend,
242                                      const TensorInfo& input0,
243                                      const TensorInfo& input1,
244                                      const DetectionPostProcessDescriptor& descriptor,
245                                      char* reasonIfUnsupported,
246                                      size_t reasonIfUnsupportedMaxLength);
247
248 bool IsDivisionSupported(const BackendId& backend,
249                          const TensorInfo& input0,
250                          const TensorInfo& input1,
251                          const TensorInfo& output,
252                          char* reasonIfUnsupported,
253                          size_t reasonIfUnsupportedMaxLength)
254 {
255     FORWARD_LAYER_SUPPORT_FUNC(backend, IsDivisionSupported, input0, input1, output);
256 }
257
258 bool IsEqualSupported(const BackendId& backend,
259                       const TensorInfo& input0,
260                       const TensorInfo& input1,
261                       const TensorInfo& output,
262                       char* reasonIfUnsupported,
263                       size_t reasonIfUnsupportedMaxLength)
264 {
265     FORWARD_LAYER_SUPPORT_FUNC(backend, IsEqualSupported, input0, input1, output);
266 }
267
268 bool IsFakeQuantizationSupported(const BackendId& backend,
269                                  const TensorInfo& input,
270                                  const FakeQuantizationDescriptor& descriptor,
271                                  char* reasonIfUnsupported,
272                                  size_t reasonIfUnsupportedMaxLength)
273 {
274     FORWARD_LAYER_SUPPORT_FUNC(backend, IsFakeQuantizationSupported, input, descriptor);
275 }
276
277 bool IsFloorSupported(const BackendId& backend,
278                       const TensorInfo& input,
279                       const TensorInfo& output,
280                       char* reasonIfUnsupported,
281                       size_t reasonIfUnsupportedMaxLength)
282 {
283     // By definition (that is, regardless of compute device), shapes and data type must match.
284     if (input.GetShape() != output.GetShape() || input.GetDataType() != output.GetDataType())
285     {
286         return false;
287     }
288
289     FORWARD_LAYER_SUPPORT_FUNC(backend, IsFloorSupported, input, output);
290 }
291 bool IsFullyConnectedSupported(const BackendId& backend,
292                                const TensorInfo& input,
293                                const TensorInfo& output,
294                                const TensorInfo& weights,
295                                const TensorInfo& biases,
296                                const FullyConnectedDescriptor& descriptor,
297                                char* reasonIfUnsupported,
298                                size_t reasonIfUnsupportedMaxLength)
299 {
300     FORWARD_LAYER_SUPPORT_FUNC(backend, IsFullyConnectedSupported, input, output, weights, biases, descriptor);
301 }
302
303 bool IsGatherSupported(const BackendId& backend,
304                        const TensorInfo& input0,
305                        const TensorInfo& input1,
306                        const TensorInfo& output,
307                        char* reasonIfUnsupported,
308                        size_t reasonIfUnsupportedMaxLength)
309 {
310     FORWARD_LAYER_SUPPORT_FUNC(backend, IsGatherSupported, input0, input1, output);
311 }
312
313 bool IsGreaterSupported(const BackendId& backend,
314                         const TensorInfo& input0,
315                         const TensorInfo& input1,
316                         const TensorInfo& output,
317                         char* reasonIfUnsupported,
318                         size_t reasonIfUnsupportedMaxLength)
319 {
320     FORWARD_LAYER_SUPPORT_FUNC(backend, IsGreaterSupported, input0, input1, output);
321 }
322
323 bool IsInputSupported(const BackendId& backend,
324                       const TensorInfo& input,
325                       char* reasonIfUnsupported,
326                       size_t reasonIfUnsupportedMaxLength)
327 {
328     FORWARD_LAYER_SUPPORT_FUNC(backend, IsInputSupported, input);
329 }
330
331
332 bool IsL2NormalizationSupported(const BackendId& backend,
333                                 const TensorInfo& input,
334                                 const TensorInfo& output,
335                                 const L2NormalizationDescriptor& descriptor,
336                                 char* reasonIfUnsupported,
337                                 size_t reasonIfUnsupportedMaxLength)
338 {
339     FORWARD_LAYER_SUPPORT_FUNC(backend, IsL2NormalizationSupported, input, output, descriptor);
340 }
341
342 bool IsLstmSupported(const BackendId& backend, const TensorInfo& input, const TensorInfo& outputStateIn,
343                      const TensorInfo& cellStateIn, const TensorInfo& scratchBuffer,
344                      const TensorInfo& outputStateOut, const TensorInfo& cellStateOut,
345                      const TensorInfo& output, const LstmDescriptor& descriptor,
346                      const LstmInputParamsInfo& paramsInfo, char* reasonIfUnsupported,
347                      size_t reasonIfUnsupportedMaxLength)
348
349 {
350     FORWARD_LAYER_SUPPORT_FUNC(backend, IsLstmSupported, input, outputStateIn, cellStateIn,
351                                scratchBuffer, outputStateOut, cellStateOut,
352                                output, descriptor, paramsInfo);
353 }
354
355 bool IsMaximumSupported(const BackendId& backend,
356                         const TensorInfo& input0,
357                         const TensorInfo& input1,
358                         const TensorInfo& output,
359                         char* reasonIfUnsupported,
360                         size_t reasonIfUnsupportedMaxLength)
361 {
362     FORWARD_LAYER_SUPPORT_FUNC(backend, IsMaximumSupported, input0, input1, output);
363 }
364
365 bool IsMeanSupported(const BackendId& backend,
366                      const TensorInfo& input,
367                      const TensorInfo& output,
368                      const MeanDescriptor& descriptor,
369                      char* reasonIfUnsupported,
370                      size_t reasonIfUnsupportedMaxLength)
371 {
372     FORWARD_LAYER_SUPPORT_FUNC(backend, IsMeanSupported, input, output, descriptor);
373 }
374
375 bool IsMemCopySupported(const BackendId &backend,
376                         const TensorInfo &input,
377                         const TensorInfo &output,
378                         char *reasonIfUnsupported,
379                         size_t reasonIfUnsupportedMaxLength)
380 {
381     FORWARD_LAYER_SUPPORT_FUNC(backend, IsMemCopySupported, input, output);
382 }
383
384 bool IsMemImportSupported(const BackendId &backend,
385                           const TensorInfo &input,
386                           const TensorInfo &output,
387                           char *reasonIfUnsupported,
388                           size_t reasonIfUnsupportedMaxLength)
389 {
390     FORWARD_LAYER_SUPPORT_FUNC(backend, IsMemImportSupported, input, output);
391 }
392
393 bool IsMergeSupported(const BackendId& backend,
394                       const TensorInfo& input0,
395                       const TensorInfo& input1,
396                       const TensorInfo& output,
397                       char* reasonIfUnsupported,
398                       size_t reasonIfUnsupportedMaxLength)
399 {
400     FORWARD_LAYER_SUPPORT_FUNC(backend, IsMergeSupported, input0, input1, output);
401 }
402
403 ARMNN_DEPRECATED_MSG("Use IsConcatSupported instead")
404 bool IsMergerSupported(const BackendId& backend,
405                        std::vector<const TensorInfo*> inputs,
406                        const TensorInfo& output,
407                        const OriginsDescriptor& descriptor,
408                        char* reasonIfUnsupported,
409                        size_t reasonIfUnsupportedMaxLength)
410 {
411     BOOST_ASSERT(inputs.size() > 0);
412
413     ARMNN_NO_DEPRECATE_WARN_BEGIN
414     FORWARD_LAYER_SUPPORT_FUNC(backend, IsMergerSupported, inputs, output, descriptor);
415     ARMNN_NO_DEPRECATE_WARN_END
416 }
417
418 bool IsMinimumSupported(const BackendId& backend,
419                         const TensorInfo& input0,
420                         const TensorInfo& input1,
421                         const TensorInfo& output,
422                         char* reasonIfUnsupported,
423                         size_t reasonIfUnsupportedMaxLength)
424 {
425     FORWARD_LAYER_SUPPORT_FUNC(backend, IsMinimumSupported, input0, input1, output);
426 }
427
428 bool IsMultiplicationSupported(const BackendId& backend,
429                                const TensorInfo& input0,
430                                const TensorInfo& input1,
431                                const TensorInfo& output,
432                                char* reasonIfUnsupported,
433                                size_t reasonIfUnsupportedMaxLength)
434 {
435     FORWARD_LAYER_SUPPORT_FUNC(backend, IsMultiplicationSupported, input0, input1, output);
436 }
437
438 bool IsNormalizationSupported(const BackendId& backend,
439                               const TensorInfo& input,
440                               const TensorInfo& output,
441                               const NormalizationDescriptor& descriptor,
442                               char* reasonIfUnsupported,
443                               size_t reasonIfUnsupportedMaxLength)
444 {
445     FORWARD_LAYER_SUPPORT_FUNC(backend, IsNormalizationSupported, input, output, descriptor);
446 }
447
448 bool IsOutputSupported(const BackendId& backend,
449                        const TensorInfo& output,
450                        char* reasonIfUnsupported,
451                        size_t reasonIfUnsupportedMaxLength)
452 {
453     FORWARD_LAYER_SUPPORT_FUNC(backend, IsOutputSupported, output);
454 }
455
456 bool IsPadSupported(const BackendId& backend,
457                     const TensorInfo& input,
458                     const TensorInfo& output,
459                     const PadDescriptor& descriptor,
460                     char* reasonIfUnsupported,
461                     size_t reasonIfUnsupportedMaxLength)
462 {
463
464     FORWARD_LAYER_SUPPORT_FUNC(backend, IsPadSupported, input, output, descriptor);
465 }
466
467 bool IsQuantizeSupported(const BackendId& backend,
468                          const TensorInfo& input,
469                          const TensorInfo& output,
470                          char* reasonIfUnsupported,
471                          size_t reasonIfUnsupportedMaxLength)
472 {
473     FORWARD_LAYER_SUPPORT_FUNC(backend, IsQuantizeSupported, input, output);
474 }
475
476 bool IsQuantizedLstmSupported(const BackendId& backend,
477                               const TensorInfo& input,
478                               const TensorInfo& previousCellStateIn,
479                               const TensorInfo& previousOutputIn,
480                               const TensorInfo& cellStateOut,
481                               const TensorInfo& output,
482                               const QuantizedLstmInputParamsInfo& paramsInfo,
483                               char* reasonIfUnsupported,
484                               size_t reasonIfUnsupportedMaxLength)
485
486 {
487     FORWARD_LAYER_SUPPORT_FUNC(backend, IsQuantizedLstmSupported, input, previousCellStateIn, previousOutputIn,
488                                cellStateOut, output, paramsInfo);
489 }
490
491 bool IsPermuteSupported(const BackendId& backend,
492                         const TensorInfo& input,
493                         const TensorInfo& output,
494                         const PermuteDescriptor& descriptor,
495                         char* reasonIfUnsupported,
496                         size_t reasonIfUnsupportedMaxLength)
497 {
498     FORWARD_LAYER_SUPPORT_FUNC(backend, IsPermuteSupported, input, output, descriptor);
499 }
500
501 bool IsPooling2dSupported(const BackendId& backend,
502                           const TensorInfo& input,
503                           const TensorInfo& output,
504                           const Pooling2dDescriptor& descriptor,
505                           char* reasonIfUnsupported,
506                           size_t reasonIfUnsupportedMaxLength)
507 {
508     FORWARD_LAYER_SUPPORT_FUNC(backend, IsPooling2dSupported, input, output, descriptor);
509 }
510
511 bool IsPreluSupported(const BackendId& backend,
512                       const TensorInfo& input,
513                       const TensorInfo& alpha,
514                       const TensorInfo& output,
515                       char* reasonIfUnsupported,
516                       size_t reasonIfUnsupportedMaxLength)
517 {
518     FORWARD_LAYER_SUPPORT_FUNC(backend, IsPreluSupported, input, alpha, output);
519 }
520
521 bool IsReshapeSupported(const BackendId& backend,
522                         const TensorInfo& input,
523                         const ReshapeDescriptor& descriptor,
524                         char* reasonIfUnsupported,
525                         size_t reasonIfUnsupportedMaxLength)
526 {
527     FORWARD_LAYER_SUPPORT_FUNC(backend, IsReshapeSupported, input, descriptor);
528 }
529
530 bool IsResizeSupported(const BackendId& backend,
531                        const TensorInfo& input,
532                        const TensorInfo& output,
533                        const ResizeDescriptor& descriptor,
534                        char* reasonIfUnsupported,
535                        size_t reasonIfUnsupportedMaxLength)
536 {
537     FORWARD_LAYER_SUPPORT_FUNC(backend, IsResizeSupported, input, output, descriptor);
538 }
539
540 ARMNN_DEPRECATED_MSG("Use IsResizeSupported instead")
541 bool IsResizeBilinearSupported(const BackendId& backend,
542                                const TensorInfo& input,
543                                const TensorInfo& output,
544                                char* reasonIfUnsupported,
545                                size_t reasonIfUnsupportedMaxLength)
546 {
547     ResizeDescriptor descriptor;
548     descriptor.m_Method = ResizeMethod::Bilinear;
549
550     const TensorShape& outputShape = output.GetShape();
551     descriptor.m_TargetWidth  = outputShape[3];
552     descriptor.m_TargetHeight = outputShape[2];
553
554     FORWARD_LAYER_SUPPORT_FUNC(backend, IsResizeSupported, input, output, descriptor);
555 }
556
557 bool IsRsqrtSupported(const BackendId& backend,
558                       const TensorInfo& input,
559                       const TensorInfo& output,
560                       char* reasonIfUnsupported,
561                       size_t reasonIfUnsupportedMaxLength)
562 {
563     FORWARD_LAYER_SUPPORT_FUNC(backend, IsRsqrtSupported, input, output);
564 }
565
566 bool IsSoftmaxSupported(const BackendId& backend,
567                         const TensorInfo& input,
568                         const TensorInfo& output,
569                         const SoftmaxDescriptor& descriptor,
570                         char* reasonIfUnsupported,
571                         size_t reasonIfUnsupportedMaxLength)
572 {
573     FORWARD_LAYER_SUPPORT_FUNC(backend, IsSoftmaxSupported, input, output, descriptor);
574 }
575
576 bool IsSpaceToBatchNdSupported(const BackendId& backend,
577                                const TensorInfo& input,
578                                const TensorInfo& output,
579                                const SpaceToBatchNdDescriptor& descriptor,
580                                char* reasonIfUnsupported,
581                                size_t reasonIfUnsupportedMaxLength)
582 {
583     FORWARD_LAYER_SUPPORT_FUNC(backend, IsSpaceToBatchNdSupported, input, output, descriptor);
584 }
585
586 bool IsSpaceToDepthSupported(const BackendId& backend,
587                              const TensorInfo& input,
588                              const TensorInfo& output,
589                              const SpaceToDepthDescriptor& descriptor,
590                              char* reasonIfUnsupported,
591                              size_t reasonIfUnsupportedMaxLength)
592 {
593     FORWARD_LAYER_SUPPORT_FUNC(backend, IsSpaceToDepthSupported, input, output, descriptor);
594 }
595
596 ARMNN_DEPRECATED_MSG("Use IsSplitterSupported with outputs instead")
597 bool IsSplitterSupported(const BackendId& backend,
598                          const TensorInfo& input,
599                          const ViewsDescriptor& descriptor,
600                          char* reasonIfUnsupported,
601                          size_t reasonIfUnsupportedMaxLength)
602 {
603     ARMNN_NO_DEPRECATE_WARN_BEGIN
604     FORWARD_LAYER_SUPPORT_FUNC(backend, IsSplitterSupported, input, descriptor);
605     ARMNN_NO_DEPRECATE_WARN_END
606 }
607
608 bool IsSplitterSupported(const BackendId& backend,
609                          const TensorInfo& input,
610                          const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
611                          const ViewsDescriptor& descriptor,
612                          char* reasonIfUnsupported,
613                          size_t reasonIfUnsupportedMaxLength)
614 {
615     FORWARD_LAYER_SUPPORT_FUNC(backend, IsSplitterSupported, input, outputs, descriptor);
616 }
617
618 bool IsStridedSliceSupported(const BackendId& backend,
619                              const TensorInfo& input,
620                              const TensorInfo& output,
621                              const StridedSliceDescriptor& descriptor,
622                              char* reasonIfUnsupported,
623                              size_t reasonIfUnsupportedMaxLength)
624 {
625     FORWARD_LAYER_SUPPORT_FUNC(backend, IsStridedSliceSupported, input, output, descriptor);
626 }
627
628 bool IsSubtractionSupported(const BackendId& backend,
629                             const TensorInfo& input0,
630                             const TensorInfo& input1,
631                             const TensorInfo& output,
632                             char* reasonIfUnsupported,
633                             size_t reasonIfUnsupportedMaxLength)
634 {
635     FORWARD_LAYER_SUPPORT_FUNC(backend, IsSubtractionSupported, input0, input1, output);
636 }
637
638 bool IsSwitchSupported(const BackendId& backend,
639                        const TensorInfo& input0,
640                        const TensorInfo& input1,
641                        const TensorInfo& output0,
642                        const TensorInfo& output1,
643                        char* reasonIfUnsupported,
644                        size_t reasonIfUnsupportedMaxLength)
645 {
646     FORWARD_LAYER_SUPPORT_FUNC(backend, IsSwitchSupported, input0, input1, output0, output1);
647 }
648
649 } // namespace armnn