IVGCVSW-2043 - Merger using ACL for innermost concat axis
[platform/upstream/armnn.git] / src / armnn / LayerSupport.cpp
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #include <armnn/LayerSupport.hpp>
6 #include <armnn/Optional.hpp>
7 #include <armnn/ILayerSupport.hpp>
8
9 #include <backendsCommon/BackendRegistry.hpp>
10 #include <backendsCommon/IBackendInternal.hpp>
11
12 #include <boost/assert.hpp>
13
14 #include <cstring>
15 #include <algorithm>
16 #include <unordered_map>
17 #include <armnn/ArmNN.hpp>
18
19 namespace armnn
20 {
21
22 namespace
23 {
24 /// Helper function to copy a full string to a truncated version.
25 void CopyErrorMessage(char* truncatedString, const char* fullString, size_t maxLength)
26 {
27     if(truncatedString != nullptr)
28     {
29         size_t copyLength = std::min(maxLength, strlen(fullString));
30         std::strncpy(truncatedString, fullString, copyLength);
31         // Ensure null-terminated string.
32         truncatedString[copyLength] = '\0';
33     }
34 }
35
36 }
37
38 // Helper macro to avoid code duplication.
39 // Forwards function func to funcRef, funcNeon or funcCl, depending on the value of compute.
40 #define FORWARD_LAYER_SUPPORT_FUNC(backendId, func, ...) \
41     std::string reasonIfUnsupportedFull; \
42     bool isSupported; \
43     try { \
44         auto const& backendRegistry = BackendRegistryInstance(); \
45         if (!backendRegistry.IsBackendRegistered(backendId)) \
46         { \
47             std::stringstream ss; \
48             ss << __func__ << " is not supported on " << backendId << " because this backend is not registered."; \
49             reasonIfUnsupportedFull = ss.str(); \
50             isSupported = false; \
51         } \
52         else \
53         { \
54             auto factoryFunc = backendRegistry.GetFactory(backendId); \
55             auto backendObject = factoryFunc(); \
56             auto layerSupportObject = backendObject->GetLayerSupport(); \
57             isSupported = layerSupportObject->func(__VA_ARGS__, Optional<std::string&>(reasonIfUnsupportedFull)); \
58             CopyErrorMessage(reasonIfUnsupported, reasonIfUnsupportedFull.c_str(), reasonIfUnsupportedMaxLength); \
59         } \
60     } catch (InvalidArgumentException e) { \
61         /* re-throwing with more context information */ \
62         throw InvalidArgumentException(e, "Failed to check layer support", CHECK_LOCATION()); \
63     } \
64     return isSupported;
65
66 bool CheckTensorDataTypesEqual(const TensorInfo& input0, const TensorInfo& input1)
67 {
68     return input0.GetDataType() == input1.GetDataType();
69 }
70
71 bool IsActivationSupported(const BackendId& backend,
72                            const TensorInfo& input,
73                            const TensorInfo& output,
74                            const ActivationDescriptor& descriptor,
75                            char* reasonIfUnsupported,
76                            size_t reasonIfUnsupportedMaxLength)
77 {
78     FORWARD_LAYER_SUPPORT_FUNC(backend, IsActivationSupported, input, output, descriptor);
79 }
80
81 bool IsAdditionSupported(const BackendId& backend,
82                          const TensorInfo& input0,
83                          const TensorInfo& input1,
84                          const TensorInfo& output,
85                          char* reasonIfUnsupported,
86                          size_t reasonIfUnsupportedMaxLength)
87 {
88     if(!CheckTensorDataTypesEqual(input0, input1))
89     {
90         return false;
91     }
92
93     FORWARD_LAYER_SUPPORT_FUNC(backend, IsAdditionSupported, input0, input1, output);
94 }
95
96 bool IsBatchNormalizationSupported(const BackendId& backend,
97                                    const TensorInfo& input,
98                                    const TensorInfo& output,
99                                    const TensorInfo& mean,
100                                    const TensorInfo& var,
101                                    const TensorInfo& beta,
102                                    const TensorInfo& gamma,
103                                    const BatchNormalizationDescriptor& descriptor,
104                                    char* reasonIfUnsupported,
105                                    size_t reasonIfUnsupportedMaxLength)
106 {
107     FORWARD_LAYER_SUPPORT_FUNC(backend,
108                                IsBatchNormalizationSupported,
109                                input,
110                                output,
111                                mean,
112                                var,
113                                beta,
114                                gamma,
115                                descriptor);
116 }
117
118 bool IsBatchToSpaceNdSupported(const BackendId& backend,
119                                const TensorInfo& input,
120                                const TensorInfo& output,
121                                const BatchToSpaceNdDescriptor& descriptor,
122                                char* reasonIfUnsupported,
123                                size_t reasonIfUnsupportedMaxLength)
124 {
125     FORWARD_LAYER_SUPPORT_FUNC(backend,
126                                IsBatchToSpaceNdSupported,
127                                input,
128                                output,
129                                descriptor);
130 }
131
132 bool IsConstantSupported(const BackendId& backend,
133                          const TensorInfo& output,
134                          char* reasonIfUnsupported,
135                          size_t reasonIfUnsupportedMaxLength)
136 {
137     FORWARD_LAYER_SUPPORT_FUNC(backend, IsConstantSupported, output);
138 }
139
140 bool IsConvertFp16ToFp32Supported(const BackendId& backend,
141                                   const TensorInfo& input,
142                                   const TensorInfo& output,
143                                   char* reasonIfUnsupported,
144                                   size_t reasonIfUnsupportedMaxLength)
145 {
146     FORWARD_LAYER_SUPPORT_FUNC(backend, IsConvertFp16ToFp32Supported, input, output);
147 }
148
149 bool IsConvertFp32ToFp16Supported(const BackendId& backend,
150                                   const TensorInfo& input,
151                                   const TensorInfo& output,
152                                   char* reasonIfUnsupported,
153                                   size_t reasonIfUnsupportedMaxLength)
154 {
155     FORWARD_LAYER_SUPPORT_FUNC(backend, IsConvertFp32ToFp16Supported, input, output);
156 }
157
158 bool IsConvolution2dSupported(const BackendId& backend,
159                               const TensorInfo& input,
160                               const TensorInfo& output,
161                               const Convolution2dDescriptor& descriptor,
162                               const TensorInfo& weights,
163                               const Optional<TensorInfo>& biases,
164                               char* reasonIfUnsupported,
165                               size_t reasonIfUnsupportedMaxLength)
166 {
167     FORWARD_LAYER_SUPPORT_FUNC(backend, IsConvolution2dSupported, input, output, descriptor, weights, biases);
168 }
169
170 bool IsDivisionSupported(const BackendId& backend,
171                          const TensorInfo& input0,
172                          const TensorInfo& input1,
173                          const TensorInfo& output,
174                          char* reasonIfUnsupported,
175                          size_t reasonIfUnsupportedMaxLength)
176 {
177     FORWARD_LAYER_SUPPORT_FUNC(backend, IsDivisionSupported, input0, input1, output);
178 }
179
180 bool IsSubtractionSupported(const BackendId& backend,
181                             const TensorInfo& input0,
182                             const TensorInfo& input1,
183                             const TensorInfo& output,
184                             char* reasonIfUnsupported,
185                             size_t reasonIfUnsupportedMaxLength)
186 {
187     FORWARD_LAYER_SUPPORT_FUNC(backend, IsSubtractionSupported, input0, input1, output);
188 }
189
190 bool IsDepthwiseConvolutionSupported(const BackendId& backend,
191                                      const TensorInfo& input,
192                                      const TensorInfo& output,
193                                      const DepthwiseConvolution2dDescriptor& descriptor,
194                                      const TensorInfo& weights,
195                                      const Optional<TensorInfo>& biases,
196                                      char* reasonIfUnsupported,
197                                      size_t reasonIfUnsupportedMaxLength)
198 {
199     FORWARD_LAYER_SUPPORT_FUNC(backend, IsDepthwiseConvolutionSupported, input, output, descriptor, weights, biases);
200 }
201
202 bool IsInputSupported(const BackendId& backend,
203                       const TensorInfo& input,
204                       char* reasonIfUnsupported,
205                       size_t reasonIfUnsupportedMaxLength)
206 {
207     FORWARD_LAYER_SUPPORT_FUNC(backend, IsInputSupported, input);
208 }
209
210 bool IsFullyConnectedSupported(const BackendId& backend,
211                                const TensorInfo& input,
212                                const TensorInfo& output,
213                                const TensorInfo& weights,
214                                const TensorInfo& biases,
215                                const FullyConnectedDescriptor& descriptor,
216                                char* reasonIfUnsupported,
217                                size_t reasonIfUnsupportedMaxLength)
218 {
219     FORWARD_LAYER_SUPPORT_FUNC(backend, IsFullyConnectedSupported, input, output, weights, biases, descriptor);
220 }
221
222 bool IsL2NormalizationSupported(const BackendId& backend,
223                                 const TensorInfo& input,
224                                 const TensorInfo& output,
225                                 const L2NormalizationDescriptor& descriptor,
226                                 char* reasonIfUnsupported,
227                                 size_t reasonIfUnsupportedMaxLength)
228 {
229     FORWARD_LAYER_SUPPORT_FUNC(backend, IsL2NormalizationSupported, input, output, descriptor);
230 }
231
232 bool IsLstmSupported(const BackendId& backend, const TensorInfo& input, const TensorInfo& outputStateIn,
233                      const TensorInfo& cellStateIn, const TensorInfo& scratchBuffer,
234                      const TensorInfo& outputStateOut, const TensorInfo& cellStateOut,
235                      const TensorInfo& output, const LstmDescriptor& descriptor,
236                      const TensorInfo& inputToForgetWeights, const TensorInfo& inputToCellWeights,
237                      const TensorInfo& inputToOutputWeights, const TensorInfo& recurrentToForgetWeights,
238                      const TensorInfo& recurrentToCellWeights, const TensorInfo& recurrentToOutputWeights,
239                      const TensorInfo& forgetGateBias, const TensorInfo& cellBias,
240                      const TensorInfo& outputGateBias, const TensorInfo* inputToInputWeights,
241                      const TensorInfo* recurrentToInputWeights, const TensorInfo* cellToInputWeights,
242                      const TensorInfo* inputGateBias, const TensorInfo* projectionWeights,
243                      const TensorInfo* projectionBias, const TensorInfo* cellToForgetWeights,
244                      const TensorInfo* cellToOutputWeights, char* reasonIfUnsupported,
245                      size_t reasonIfUnsupportedMaxLength)
246
247 {
248     FORWARD_LAYER_SUPPORT_FUNC(backend, IsLstmSupported, input, outputStateIn, cellStateIn,
249                                scratchBuffer, outputStateOut, cellStateOut,
250                                output, descriptor, inputToForgetWeights, inputToCellWeights,
251                                inputToOutputWeights, recurrentToForgetWeights,
252                                recurrentToCellWeights, recurrentToOutputWeights,
253                                forgetGateBias, cellBias, outputGateBias,
254                                inputToInputWeights, recurrentToInputWeights,
255                                cellToInputWeights, inputGateBias, projectionWeights,
256                                projectionBias, cellToForgetWeights, cellToOutputWeights);
257 }
258 bool IsMergerSupported(const BackendId& backend,
259                        std::vector<const TensorInfo*> inputs,
260                        const TensorInfo& output,
261                        const OriginsDescriptor& descriptor,
262                        char* reasonIfUnsupported,
263                        size_t reasonIfUnsupportedMaxLength)
264 {
265     BOOST_ASSERT(inputs.size() > 0);
266     FORWARD_LAYER_SUPPORT_FUNC(backend, IsMergerSupported, inputs, output, descriptor);
267 }
268
269 bool IsMultiplicationSupported(const BackendId& backend,
270                                const TensorInfo& input0,
271                                const TensorInfo& input1,
272                                const TensorInfo& output,
273                                char* reasonIfUnsupported,
274                                size_t reasonIfUnsupportedMaxLength)
275 {
276     FORWARD_LAYER_SUPPORT_FUNC(backend, IsMultiplicationSupported, input0, input1, output);
277 }
278
279 bool IsNormalizationSupported(const BackendId& backend,
280                               const TensorInfo& input,
281                               const TensorInfo& output,
282                               const NormalizationDescriptor& descriptor,
283                               char* reasonIfUnsupported,
284                               size_t reasonIfUnsupportedMaxLength)
285 {
286     FORWARD_LAYER_SUPPORT_FUNC(backend, IsNormalizationSupported, input, output, descriptor);
287 }
288
289 bool IsOutputSupported(const BackendId& backend,
290                        const TensorInfo& output,
291                        char* reasonIfUnsupported,
292                        size_t reasonIfUnsupportedMaxLength)
293 {
294     FORWARD_LAYER_SUPPORT_FUNC(backend, IsOutputSupported, output);
295 }
296
297 bool IsPermuteSupported(const BackendId& backend,
298                         const TensorInfo& input,
299                         const TensorInfo& output,
300                         const PermuteDescriptor& descriptor,
301                         char* reasonIfUnsupported,
302                         size_t reasonIfUnsupportedMaxLength)
303 {
304     FORWARD_LAYER_SUPPORT_FUNC(backend, IsPermuteSupported, input, output, descriptor);
305 }
306
307 bool IsPooling2dSupported(const BackendId& backend,
308                           const TensorInfo& input,
309                           const TensorInfo& output,
310                           const Pooling2dDescriptor& descriptor,
311                           char* reasonIfUnsupported,
312                           size_t reasonIfUnsupportedMaxLength)
313 {
314     FORWARD_LAYER_SUPPORT_FUNC(backend, IsPooling2dSupported, input, output, descriptor);
315 }
316
317 bool IsResizeBilinearSupported(const BackendId& backend,
318                                const TensorInfo& input,
319                                char* reasonIfUnsupported,
320                                size_t reasonIfUnsupportedMaxLength)
321 {
322     FORWARD_LAYER_SUPPORT_FUNC(backend, IsResizeBilinearSupported, input);
323 }
324
325 bool IsSoftmaxSupported(const BackendId& backend,
326                         const TensorInfo& input,
327                         const TensorInfo& output,
328                         const SoftmaxDescriptor& descriptor,
329                         char* reasonIfUnsupported,
330                         size_t reasonIfUnsupportedMaxLength)
331 {
332     FORWARD_LAYER_SUPPORT_FUNC(backend, IsSoftmaxSupported, input, output, descriptor);
333 }
334
335 bool IsSpaceToBatchNdSupported(const BackendId& backend,
336                                const TensorInfo& input,
337                                const TensorInfo& output,
338                                const SpaceToBatchNdDescriptor& descriptor,
339                                char* reasonIfUnsupported,
340                                size_t reasonIfUnsupportedMaxLength)
341 {
342     FORWARD_LAYER_SUPPORT_FUNC(backend, IsSpaceToBatchNdSupported, input, output, descriptor);
343 }
344
345 bool IsSplitterSupported(const BackendId& backend,
346                          const TensorInfo& input,
347                          const ViewsDescriptor& descriptor,
348                          char* reasonIfUnsupported,
349                          size_t reasonIfUnsupportedMaxLength)
350 {
351     FORWARD_LAYER_SUPPORT_FUNC(backend, IsSplitterSupported, input, descriptor);
352 }
353
354 bool IsFakeQuantizationSupported(const BackendId& backend,
355                                  const TensorInfo& input,
356                                  const FakeQuantizationDescriptor& descriptor,
357                                  char* reasonIfUnsupported,
358                                  size_t reasonIfUnsupportedMaxLength)
359 {
360     FORWARD_LAYER_SUPPORT_FUNC(backend, IsFakeQuantizationSupported, input, descriptor);
361 }
362
363 bool IsReshapeSupported(const BackendId& backend,
364                         const TensorInfo& input,
365                         char* reasonIfUnsupported,
366                         size_t reasonIfUnsupportedMaxLength)
367 {
368     FORWARD_LAYER_SUPPORT_FUNC(backend, IsReshapeSupported, input);
369 }
370
371 bool IsFloorSupported(const BackendId& backend,
372                       const TensorInfo& input,
373                       const TensorInfo& output,
374                       char* reasonIfUnsupported,
375                       size_t reasonIfUnsupportedMaxLength)
376 {
377     // By definition (that is, regardless of compute device), shapes and data type must match.
378     if (input.GetShape() != output.GetShape() || input.GetDataType() != output.GetDataType())
379     {
380         return false;
381     }
382
383     FORWARD_LAYER_SUPPORT_FUNC(backend, IsFloorSupported, input, output);
384 }
385
386 bool IsMeanSupported(const BackendId& backend,
387                      const TensorInfo& input,
388                      const TensorInfo& output,
389                      const MeanDescriptor& descriptor,
390                      char* reasonIfUnsupported,
391                      size_t reasonIfUnsupportedMaxLength)
392 {
393     FORWARD_LAYER_SUPPORT_FUNC(backend, IsMeanSupported, input, output, descriptor);
394 }
395
396 bool IsPadSupported(const BackendId& backend,
397                     const TensorInfo& input,
398                     const TensorInfo& output,
399                     const PadDescriptor& descriptor,
400                     char* reasonIfUnsupported,
401                     size_t reasonIfUnsupportedMaxLength)
402 {
403
404     FORWARD_LAYER_SUPPORT_FUNC(backend, IsPadSupported, input, output, descriptor);
405 }
406
407 bool IsStridedSliceSupported(const BackendId& backend,
408                              const TensorInfo& input,
409                              const TensorInfo& output,
410                              const StridedSliceDescriptor& descriptor,
411                              char* reasonIfUnsupported,
412                              size_t reasonIfUnsupportedMaxLength)
413 {
414     FORWARD_LAYER_SUPPORT_FUNC(backend, IsStridedSliceSupported, input, output, descriptor);
415 }
416
417 }