IVGCVSW-3722 Add front end support for ArgMinMax
[platform/upstream/armnn.git] / src / backends / backendsCommon / LayerSupportBase.cpp
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5
6 #include "LayerSupportBase.hpp"
7
8 #include <armnn/Exceptions.hpp>
9
10 #include <boost/core/ignore_unused.hpp>
11
12 namespace
13 {
14
15 bool DefaultLayerSupport(const char* func,
16                          const char* file,
17                          unsigned int line,
18                          armnn::Optional<std::string&> reasonIfUnsupported)
19 {
20     // NOTE: We only need to return the reason if the optional parameter is not empty
21     if (reasonIfUnsupported)
22     {
23         std::stringstream message;
24         message << func << " is not implemented [" << file << ":" << line << "]";
25
26         reasonIfUnsupported.value() = message.str();
27     }
28
29     return false;
30 }
31
32 } // anonymous namespace
33
34 namespace armnn
35 {
36
37 bool LayerSupportBase::IsAbsSupported(const TensorInfo &input,
38                                       const TensorInfo &output,
39                                       Optional<std::string &> reasonIfUnsupported) const
40 {
41     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
42 }
43
44 bool LayerSupportBase::IsActivationSupported(const TensorInfo& input,
45                                              const TensorInfo& output,
46                                              const ActivationDescriptor& descriptor,
47                                              Optional<std::string&> reasonIfUnsupported) const
48 {
49     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
50 }
51
52 bool LayerSupportBase::IsAdditionSupported(const TensorInfo& input0,
53                                            const TensorInfo& input1,
54                                            const TensorInfo& output,
55                                            Optional<std::string&> reasonIfUnsupported) const
56 {
57     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
58 }
59
60 bool LayerSupportBase::IsArgMinMaxSupported(const armnn::TensorInfo &input, const armnn::TensorInfo &output,
61                                             const armnn::ArgMinMaxDescriptor& descriptor,
62                                             armnn::Optional<std::string &> reasonIfUnsupported) const
63 {
64     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
65 }
66
67 bool LayerSupportBase::IsBatchNormalizationSupported(const TensorInfo& input,
68                                                      const TensorInfo& output,
69                                                      const TensorInfo& mean,
70                                                      const TensorInfo& var,
71                                                      const TensorInfo& beta,
72                                                      const TensorInfo& gamma,
73                                                      const BatchNormalizationDescriptor& descriptor,
74                                                      Optional<std::string&> reasonIfUnsupported) const
75 {
76     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
77 }
78
79 bool LayerSupportBase::IsBatchToSpaceNdSupported(const TensorInfo& input,
80                                                  const TensorInfo& output,
81                                                  const BatchToSpaceNdDescriptor& descriptor,
82                                                  Optional<std::string&> reasonIfUnsupported) const
83 {
84     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
85 }
86
87 bool LayerSupportBase::IsConcatSupported(const std::vector<const TensorInfo*> inputs,
88                                          const TensorInfo& output,
89                                          const OriginsDescriptor& descriptor,
90                                          Optional<std::string&> reasonIfUnsupported) const
91 {
92     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
93 }
94
95 bool LayerSupportBase::IsConstantSupported(const TensorInfo& output,
96                                            Optional<std::string&> reasonIfUnsupported) const
97 {
98     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
99 }
100
101 bool LayerSupportBase::IsConvertFp16ToFp32Supported(const TensorInfo& input,
102                                                     const TensorInfo& output,
103                                                     Optional<std::string&> reasonIfUnsupported) const
104 {
105     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
106 }
107
108 bool LayerSupportBase::IsConvertFp32ToFp16Supported(const TensorInfo& input,
109                                                     const TensorInfo& output,
110                                                     Optional<std::string&> reasonIfUnsupported) const
111 {
112     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
113 }
114
115 bool LayerSupportBase::IsConvolution2dSupported(const TensorInfo& input,
116                                                 const TensorInfo& output,
117                                                 const Convolution2dDescriptor& descriptor,
118                                                 const TensorInfo& weights,
119                                                 const Optional<TensorInfo>& biases,
120                                                 Optional<std::string&> reasonIfUnsupported) const
121 {
122     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
123 }
124
125 bool LayerSupportBase::IsDebugSupported(const TensorInfo& input,
126                                         const TensorInfo& output,
127                                         Optional<std::string&> reasonIfUnsupported) const
128 {
129     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
130 }
131
132 bool LayerSupportBase::IsDepthwiseConvolutionSupported(const TensorInfo& input,
133                                                        const TensorInfo& output,
134                                                        const DepthwiseConvolution2dDescriptor& descriptor,
135                                                        const TensorInfo& weights,
136                                                        const Optional<TensorInfo>& biases,
137                                                        Optional<std::string&> reasonIfUnsupported) const
138 {
139     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
140 }
141
142 bool LayerSupportBase::IsDequantizeSupported(const TensorInfo& input,
143                                              const TensorInfo& output,
144                                              Optional<std::string&> reasonIfUnsupported) const
145 {
146     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
147 }
148
149 bool LayerSupportBase::IsDetectionPostProcessSupported(const armnn::TensorInfo& input0,
150                                                        const armnn::TensorInfo& input1,
151                                                        const armnn::DetectionPostProcessDescriptor& descriptor,
152                                                        armnn::Optional<std::string&> reasonIfUnsupported) const
153 {
154     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
155 }
156
157 bool LayerSupportBase::IsDilatedDepthwiseConvolutionSupported(const TensorInfo& input,
158                                                               const TensorInfo& output,
159                                                               const DepthwiseConvolution2dDescriptor& descriptor,
160                                                               const TensorInfo& weights,
161                                                               const Optional<TensorInfo>& biases,
162                                                               Optional<std::string&> reasonIfUnsupported) const
163 {
164     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
165 }
166
167 bool LayerSupportBase::IsDivisionSupported(const TensorInfo& input0,
168                                            const TensorInfo& input1,
169                                            const TensorInfo& output,
170                                            Optional<std::string&> reasonIfUnsupported) const
171 {
172     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
173 }
174
175 bool LayerSupportBase::IsEqualSupported(const armnn::TensorInfo& input0,
176                                         const armnn::TensorInfo& input1,
177                                         const armnn::TensorInfo& output,
178                                         armnn::Optional<std::string &> reasonIfUnsupported) const
179 {
180     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
181 }
182
183 bool LayerSupportBase::IsFakeQuantizationSupported(const TensorInfo& input,
184                                                    const FakeQuantizationDescriptor& descriptor,
185                                                    Optional<std::string&> reasonIfUnsupported) const
186 {
187     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
188 }
189
190 bool LayerSupportBase::IsFloorSupported(const TensorInfo& input,
191                                         const TensorInfo& output,
192                                         Optional<std::string&> reasonIfUnsupported) const
193 {
194     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
195 }
196
197 bool LayerSupportBase::IsFullyConnectedSupported(const TensorInfo& input,
198                                                  const TensorInfo& output,
199                                                  const TensorInfo& weights,
200                                                  const TensorInfo& biases,
201                                                  const FullyConnectedDescriptor& descriptor,
202                                                  Optional<std::string&> reasonIfUnsupported) const
203 {
204     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
205 }
206
207 bool LayerSupportBase::IsGatherSupported(const armnn::TensorInfo& input0,
208                                          const armnn::TensorInfo& input1,
209                                          const armnn::TensorInfo& output,
210                                          armnn::Optional<std::string&> reasonIfUnsupported) const
211 {
212     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
213 }
214
215 bool LayerSupportBase::IsGreaterSupported(const TensorInfo& input0,
216                                           const TensorInfo& input1,
217                                           const TensorInfo& output,
218                                           Optional<std::string&> reasonIfUnsupported) const
219 {
220     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
221 }
222
223 bool LayerSupportBase::IsInputSupported(const TensorInfo& input,
224                                         Optional<std::string&> reasonIfUnsupported) const
225 {
226     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
227 }
228
229 bool LayerSupportBase::IsL2NormalizationSupported(const TensorInfo& input,
230                                                   const TensorInfo& output,
231                                                   const L2NormalizationDescriptor& descriptor,
232                                                   Optional<std::string&> reasonIfUnsupported) const
233 {
234     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
235 }
236
237 bool LayerSupportBase::IsLstmSupported(const TensorInfo& input,
238                                        const TensorInfo& outputStateIn,
239                                        const TensorInfo& cellStateIn,
240                                        const TensorInfo& scratchBuffer,
241                                        const TensorInfo& outputStateOut,
242                                        const TensorInfo& cellStateOut,
243                                        const TensorInfo& output,
244                                        const LstmDescriptor& descriptor,
245                                        const LstmInputParamsInfo& paramsInfo,
246                                        Optional<std::string&> reasonIfUnsupported) const
247 {
248     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
249 }
250
251 bool LayerSupportBase::IsMaximumSupported(const TensorInfo& input0,
252                                           const TensorInfo& input1,
253                                           const TensorInfo& output,
254                                           Optional<std::string&> reasonIfUnsupported) const
255 {
256     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
257 }
258
259 bool LayerSupportBase::IsMeanSupported(const TensorInfo& input,
260                                        const TensorInfo& output,
261                                        const MeanDescriptor& descriptor,
262                                        Optional<std::string&> reasonIfUnsupported) const
263 {
264     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
265 }
266
267 bool LayerSupportBase::IsMemCopySupported(const armnn::TensorInfo& input,
268                                           const armnn::TensorInfo& output,
269                                           armnn::Optional<std::string &> reasonIfUnsupported) const
270 {
271     boost::ignore_unused(input);
272     boost::ignore_unused(output);
273     return true;
274 }
275
276 bool LayerSupportBase::IsMemImportSupported(const armnn::TensorInfo& input,
277                                             const armnn::TensorInfo& output,
278                                             armnn::Optional<std::string &> reasonIfUnsupported) const
279 {
280     boost::ignore_unused(input);
281     boost::ignore_unused(output);
282     return true;
283 }
284
285 bool LayerSupportBase::IsMergeSupported(const TensorInfo& input0,
286                                         const TensorInfo& input1,
287                                         const TensorInfo& output,
288                                         Optional<std::string&> reasonIfUnsupported) const
289 {
290     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
291 }
292
293 bool LayerSupportBase::IsMergerSupported(const std::vector<const TensorInfo*> inputs,
294                                          const TensorInfo& output,
295                                          const OriginsDescriptor& descriptor,
296                                          Optional<std::string&> reasonIfUnsupported) const
297 {
298     return IsConcatSupported(inputs, output, descriptor, reasonIfUnsupported);
299 }
300
301 bool LayerSupportBase::IsMinimumSupported(const TensorInfo& input0,
302                                           const TensorInfo& input1,
303                                           const TensorInfo& output,
304                                           Optional<std::string&> reasonIfUnsupported) const
305 {
306     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
307 }
308
309 bool LayerSupportBase::IsMultiplicationSupported(const TensorInfo& input0,
310                                                  const TensorInfo& input1,
311                                                  const TensorInfo& output,
312                                                  Optional<std::string&> reasonIfUnsupported) const
313 {
314     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
315 }
316
317 bool LayerSupportBase::IsNormalizationSupported(const TensorInfo& input,
318                                                 const TensorInfo& output,
319                                                 const NormalizationDescriptor& descriptor,
320                                                 Optional<std::string&> reasonIfUnsupported) const
321 {
322     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
323 }
324
325 bool LayerSupportBase::IsOutputSupported(const TensorInfo& output,
326                                          Optional<std::string&> reasonIfUnsupported) const
327 {
328     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
329 }
330
331 bool LayerSupportBase::IsPadSupported(const TensorInfo& input,
332                                       const TensorInfo& output,
333                                       const PadDescriptor& descriptor,
334                                       Optional<std::string&> reasonIfUnsupported) const
335 {
336     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
337 }
338
339 bool LayerSupportBase::IsPermuteSupported(const TensorInfo& input,
340                                           const TensorInfo& output,
341                                           const PermuteDescriptor& descriptor,
342                                           Optional<std::string&> reasonIfUnsupported) const
343 {
344     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
345 }
346
347 bool LayerSupportBase::IsPooling2dSupported(const TensorInfo& input,
348                                             const TensorInfo& output,
349                                             const Pooling2dDescriptor& descriptor,
350                                             Optional<std::string&> reasonIfUnsupported) const
351 {
352     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
353 }
354
355 bool LayerSupportBase::IsPreCompiledSupported(const TensorInfo& input,
356                                               const PreCompiledDescriptor& descriptor,
357                                               Optional<std::string&> reasonIfUnsupported) const
358 {
359     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
360 }
361
362 bool LayerSupportBase::IsPreluSupported(const TensorInfo& input,
363                                         const TensorInfo& alpha,
364                                         const TensorInfo& output,
365                                         Optional<std::string &> reasonIfUnsupported) const
366 {
367     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
368 }
369
370 bool LayerSupportBase::IsQuantizeSupported(const armnn::TensorInfo& input,
371                                            const armnn::TensorInfo& output,
372                                            armnn::Optional<std::string&> reasonIfUnsupported) const
373 {
374     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
375 }
376
377 bool LayerSupportBase::IsQuantizedLstmSupported(const TensorInfo& input,
378                                                 const TensorInfo& previousCellStateIn,
379                                                 const TensorInfo& previousOutputIn,
380                                                 const TensorInfo& cellStateOut,
381                                                 const TensorInfo& output,
382                                                 const QuantizedLstmInputParamsInfo& paramsInfo,
383                                                 Optional<std::string&> reasonIfUnsupported) const
384 {
385     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
386 }
387
388 bool LayerSupportBase::IsReshapeSupported(const TensorInfo& input,
389                                           const ReshapeDescriptor& descriptor,
390                                           Optional<std::string&> reasonIfUnsupported) const
391 {
392     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
393 }
394
395 bool LayerSupportBase::IsResizeBilinearSupported(const TensorInfo& input,
396                                                  const TensorInfo& output,
397                                                  Optional<std::string&> reasonIfUnsupported) const
398 {
399     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
400 }
401
402 bool LayerSupportBase::IsResizeSupported(const TensorInfo& input,
403                                          const TensorInfo& output,
404                                          const ResizeDescriptor& descriptor,
405                                          Optional<std::string&> reasonIfUnsupported) const
406 {
407     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
408 }
409
410 bool LayerSupportBase::IsRsqrtSupported(const TensorInfo &input,
411                                         const TensorInfo &output,
412                                         Optional<std::string &> reasonIfUnsupported) const
413 {
414     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
415 }
416
417 bool LayerSupportBase::IsSoftmaxSupported(const TensorInfo& input,
418                                           const TensorInfo& output,
419                                           const SoftmaxDescriptor& descriptor,
420                                           Optional<std::string&> reasonIfUnsupported) const
421 {
422     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
423 }
424
425 bool LayerSupportBase::IsSpaceToBatchNdSupported(const TensorInfo& input,
426                                                  const TensorInfo& output,
427                                                  const SpaceToBatchNdDescriptor& descriptor,
428                                                  Optional<std::string&> reasonIfUnsupported) const
429 {
430     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
431 }
432
433 bool LayerSupportBase::IsSpaceToDepthSupported(const TensorInfo& input,
434                                                const TensorInfo& output,
435                                                const SpaceToDepthDescriptor& descriptor,
436                                                Optional<std::string&> reasonIfUnsupported) const
437 {
438     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
439 }
440
441 bool LayerSupportBase::IsSplitterSupported(const TensorInfo& input,
442                                            const ViewsDescriptor& descriptor,
443                                            Optional<std::string&> reasonIfUnsupported) const
444 {
445     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
446 }
447
448 bool LayerSupportBase::IsSplitterSupported(const TensorInfo& input,
449                                            const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
450                                            const ViewsDescriptor& descriptor,
451                                            Optional<std::string&> reasonIfUnsupported) const
452 {
453     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
454 }
455
456 bool LayerSupportBase::IsStackSupported(const std::vector<const TensorInfo*>& inputs,
457                                         const TensorInfo& output,
458                                         const StackDescriptor& descriptor,
459                                         Optional<std::string&> reasonIfUnsupported) const
460 {
461     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
462 }
463
464 bool LayerSupportBase::IsStridedSliceSupported(const TensorInfo& input,
465                                                const TensorInfo& output,
466                                                const StridedSliceDescriptor& descriptor,
467                                                Optional<std::string&> reasonIfUnsupported) const
468 {
469     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
470 }
471
472 bool LayerSupportBase::IsSubtractionSupported(const TensorInfo& input0,
473                                               const TensorInfo& input1,
474                                               const TensorInfo& output,
475                                               Optional<std::string&> reasonIfUnsupported) const
476 {
477     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
478 }
479
480 bool LayerSupportBase::IsSwitchSupported(const TensorInfo& input0,
481                                          const TensorInfo& input1,
482                                          const TensorInfo& output0,
483                                          const TensorInfo& output1,
484                                          Optional<std::string&> reasonIfUnsupported) const
485 {
486     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
487 }
488
489 bool LayerSupportBase::IsTransposeConvolution2dSupported(const TensorInfo& input,
490                                                          const TensorInfo& output,
491                                                          const TransposeConvolution2dDescriptor& descriptor,
492                                                          const TensorInfo& weights,
493                                                          const Optional<TensorInfo>& biases,
494                                                          Optional<std::string&> reasonIfUnsupported) const
495 {
496     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
497 }
498
499 } // namespace armnn