IVGCVSW-1946: Remove armnn/src from the include paths
[platform/upstream/armnn.git] / src / armnn / LayerSupport.cpp
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #include <armnn/LayerSupport.hpp>
6 #include <armnn/Optional.hpp>
7
8 #include <backendsCommon/LayerSupportRegistry.hpp>
9
10 #include <boost/assert.hpp>
11
12 #include <cstring>
13 #include <algorithm>
14 #include <unordered_map>
15
16 namespace armnn
17 {
18
19 namespace
20 {
21 /// Helper function to copy a full string to a truncated version.
22 void CopyErrorMessage(char* truncatedString, const char* fullString, size_t maxLength)
23 {
24     if(truncatedString != nullptr)
25     {
26         size_t copyLength = std::min(maxLength, strlen(fullString));
27         std::strncpy(truncatedString, fullString, copyLength);
28         // Ensure null-terminated string.
29         truncatedString[copyLength] = '\0';
30     }
31 }
32
33 }
34
35 // Helper macro to avoid code duplication.
36 // Forwards function func to funcRef, funcNeon or funcCl, depending on the value of compute.
37 #define FORWARD_LAYER_SUPPORT_FUNC(backendId, func, ...) \
38     std::string reasonIfUnsupportedFull; \
39     bool isSupported; \
40     try { \
41         auto factoryFunc = LayerSupportRegistryInstance().GetFactory(backendId); \
42         auto layerSupportObject = factoryFunc(EmptyInitializer()); \
43         isSupported = layerSupportObject->func(__VA_ARGS__, Optional<std::string&>(reasonIfUnsupportedFull)); \
44         CopyErrorMessage(reasonIfUnsupported, reasonIfUnsupportedFull.c_str(), reasonIfUnsupportedMaxLength); \
45     } catch (InvalidArgumentException e) { \
46         /* re-throwing with more context information */ \
47         throw InvalidArgumentException(e, "Failed to check layer support", CHECK_LOCATION()); \
48     } \
49     return isSupported;
50
51 bool CheckTensorDataTypesEqual(const TensorInfo& input0, const TensorInfo& input1)
52 {
53     return input0.GetDataType() == input1.GetDataType();
54 }
55
56 bool IsActivationSupported(const BackendId& backend,
57                            const TensorInfo& input,
58                            const TensorInfo& output,
59                            const ActivationDescriptor& descriptor,
60                            char* reasonIfUnsupported,
61                            size_t reasonIfUnsupportedMaxLength)
62 {
63     FORWARD_LAYER_SUPPORT_FUNC(backend, IsActivationSupported, input, output, descriptor);
64 }
65
66 bool IsAdditionSupported(const BackendId& backend,
67                          const TensorInfo& input0,
68                          const TensorInfo& input1,
69                          const TensorInfo& output,
70                          char* reasonIfUnsupported,
71                          size_t reasonIfUnsupportedMaxLength)
72 {
73     if(!CheckTensorDataTypesEqual(input0, input1))
74     {
75         return false;
76     }
77
78     FORWARD_LAYER_SUPPORT_FUNC(backend, IsAdditionSupported, input0, input1, output);
79 }
80
81 bool IsBatchNormalizationSupported(const BackendId& backend,
82                                    const TensorInfo& input,
83                                    const TensorInfo& output,
84                                    const TensorInfo& mean,
85                                    const TensorInfo& var,
86                                    const TensorInfo& beta,
87                                    const TensorInfo& gamma,
88                                    const BatchNormalizationDescriptor& descriptor,
89                                    char* reasonIfUnsupported,
90                                    size_t reasonIfUnsupportedMaxLength)
91 {
92     FORWARD_LAYER_SUPPORT_FUNC(backend,
93                                IsBatchNormalizationSupported,
94                                input,
95                                output,
96                                mean,
97                                var,
98                                beta,
99                                gamma,
100                                descriptor);
101 }
102
103 bool IsConstantSupported(const BackendId& backend,
104                          const TensorInfo& output,
105                          char* reasonIfUnsupported,
106                          size_t reasonIfUnsupportedMaxLength)
107 {
108     FORWARD_LAYER_SUPPORT_FUNC(backend, IsConstantSupported, output);
109 }
110
111 bool IsConvertFp16ToFp32Supported(const BackendId& backend,
112                                   const TensorInfo& input,
113                                   const TensorInfo& output,
114                                   char* reasonIfUnsupported,
115                                   size_t reasonIfUnsupportedMaxLength)
116 {
117     FORWARD_LAYER_SUPPORT_FUNC(backend, IsConvertFp16ToFp32Supported, input, output);
118 }
119
120 bool IsConvertFp32ToFp16Supported(const BackendId& backend,
121                                   const TensorInfo& input,
122                                   const TensorInfo& output,
123                                   char* reasonIfUnsupported,
124                                   size_t reasonIfUnsupportedMaxLength)
125 {
126     FORWARD_LAYER_SUPPORT_FUNC(backend, IsConvertFp32ToFp16Supported, input, output);
127 }
128
129 bool IsConvolution2dSupported(const BackendId& backend,
130                               const TensorInfo& input,
131                               const TensorInfo& output,
132                               const Convolution2dDescriptor& descriptor,
133                               const TensorInfo& weights,
134                               const Optional<TensorInfo>& biases,
135                               char* reasonIfUnsupported,
136                               size_t reasonIfUnsupportedMaxLength)
137 {
138     FORWARD_LAYER_SUPPORT_FUNC(backend, IsConvolution2dSupported, input, output, descriptor, weights, biases);
139 }
140
141 bool IsDivisionSupported(const BackendId& backend,
142                          const TensorInfo& input0,
143                          const TensorInfo& input1,
144                          const TensorInfo& output,
145                          char* reasonIfUnsupported,
146                          size_t reasonIfUnsupportedMaxLength)
147 {
148     FORWARD_LAYER_SUPPORT_FUNC(backend, IsDivisionSupported, input0, input1, output);
149 }
150
151 bool IsSubtractionSupported(const BackendId& backend,
152                             const TensorInfo& input0,
153                             const TensorInfo& input1,
154                             const TensorInfo& output,
155                             char* reasonIfUnsupported,
156                             size_t reasonIfUnsupportedMaxLength)
157 {
158     FORWARD_LAYER_SUPPORT_FUNC(backend, IsSubtractionSupported, input0, input1, output);
159 }
160
161 bool IsDepthwiseConvolutionSupported(const BackendId& backend,
162                                      const TensorInfo& input,
163                                      const TensorInfo& output,
164                                      const DepthwiseConvolution2dDescriptor& descriptor,
165                                      const TensorInfo& weights,
166                                      const Optional<TensorInfo>& biases,
167                                      char* reasonIfUnsupported,
168                                      size_t reasonIfUnsupportedMaxLength)
169 {
170     FORWARD_LAYER_SUPPORT_FUNC(backend, IsDepthwiseConvolutionSupported, input, output, descriptor, weights, biases);
171 }
172
173 bool IsInputSupported(const BackendId& backend,
174                       const TensorInfo& input,
175                       char* reasonIfUnsupported,
176                       size_t reasonIfUnsupportedMaxLength)
177 {
178     FORWARD_LAYER_SUPPORT_FUNC(backend, IsInputSupported, input);
179 }
180
181 bool IsFullyConnectedSupported(const BackendId& backend,
182                                const TensorInfo& input,
183                                const TensorInfo& output,
184                                const TensorInfo& weights,
185                                const TensorInfo& biases,
186                                const FullyConnectedDescriptor& descriptor,
187                                char* reasonIfUnsupported,
188                                size_t reasonIfUnsupportedMaxLength)
189 {
190     FORWARD_LAYER_SUPPORT_FUNC(backend, IsFullyConnectedSupported, input, output, weights, biases, descriptor);
191 }
192
193 bool IsL2NormalizationSupported(const BackendId& backend,
194                                 const TensorInfo& input,
195                                 const TensorInfo& output,
196                                 const L2NormalizationDescriptor& descriptor,
197                                 char* reasonIfUnsupported,
198                                 size_t reasonIfUnsupportedMaxLength)
199 {
200     FORWARD_LAYER_SUPPORT_FUNC(backend, IsL2NormalizationSupported, input, output, descriptor);
201 }
202
203 bool IsLstmSupported(const BackendId& backend, const TensorInfo& input, const TensorInfo& outputStateIn,
204                      const TensorInfo& cellStateIn, const TensorInfo& scratchBuffer,
205                      const TensorInfo& outputStateOut, const TensorInfo& cellStateOut,
206                      const TensorInfo& output, const LstmDescriptor& descriptor,
207                      const TensorInfo& inputToForgetWeights, const TensorInfo& inputToCellWeights,
208                      const TensorInfo& inputToOutputWeights, const TensorInfo& recurrentToForgetWeights,
209                      const TensorInfo& recurrentToCellWeights, const TensorInfo& recurrentToOutputWeights,
210                      const TensorInfo& forgetGateBias, const TensorInfo& cellBias,
211                      const TensorInfo& outputGateBias, const TensorInfo* inputToInputWeights,
212                      const TensorInfo* recurrentToInputWeights, const TensorInfo* cellToInputWeights,
213                      const TensorInfo* inputGateBias, const TensorInfo* projectionWeights,
214                      const TensorInfo* projectionBias, const TensorInfo* cellToForgetWeights,
215                      const TensorInfo* cellToOutputWeights, char* reasonIfUnsupported,
216                      size_t reasonIfUnsupportedMaxLength)
217
218 {
219     FORWARD_LAYER_SUPPORT_FUNC(backend, IsLstmSupported, input, outputStateIn, cellStateIn,
220                                scratchBuffer, outputStateOut, cellStateOut,
221                                output, descriptor, inputToForgetWeights, inputToCellWeights,
222                                inputToOutputWeights, recurrentToForgetWeights,
223                                recurrentToCellWeights, recurrentToOutputWeights,
224                                forgetGateBias, cellBias, outputGateBias,
225                                inputToInputWeights, recurrentToInputWeights,
226                                cellToInputWeights, inputGateBias, projectionWeights,
227                                projectionBias, cellToForgetWeights, cellToOutputWeights);
228 }
229 bool IsMergerSupported(const BackendId& backend,
230                        std::vector<const TensorInfo*> inputs,
231                        const OriginsDescriptor& descriptor,
232                        char* reasonIfUnsupported,
233                        size_t reasonIfUnsupportedMaxLength)
234 {
235     BOOST_ASSERT(inputs.size() > 0);
236     FORWARD_LAYER_SUPPORT_FUNC(backend, IsMergerSupported, inputs, descriptor);
237 }
238
239 bool IsMultiplicationSupported(const BackendId& backend,
240                                const TensorInfo& input0,
241                                const TensorInfo& input1,
242                                const TensorInfo& output,
243                                char* reasonIfUnsupported,
244                                size_t reasonIfUnsupportedMaxLength)
245 {
246     FORWARD_LAYER_SUPPORT_FUNC(backend, IsMultiplicationSupported, input0, input1, output);
247 }
248
249 bool IsNormalizationSupported(const BackendId& backend,
250                               const TensorInfo& input,
251                               const TensorInfo& output,
252                               const NormalizationDescriptor& descriptor,
253                               char* reasonIfUnsupported,
254                               size_t reasonIfUnsupportedMaxLength)
255 {
256     FORWARD_LAYER_SUPPORT_FUNC(backend, IsNormalizationSupported, input, output, descriptor);
257 }
258
259 bool IsOutputSupported(const BackendId& backend,
260                        const TensorInfo& output,
261                        char* reasonIfUnsupported,
262                        size_t reasonIfUnsupportedMaxLength)
263 {
264     FORWARD_LAYER_SUPPORT_FUNC(backend, IsOutputSupported, output);
265 }
266
267 bool IsPermuteSupported(const BackendId& backend,
268                         const TensorInfo& input,
269                         const TensorInfo& output,
270                         const PermuteDescriptor& descriptor,
271                         char* reasonIfUnsupported,
272                         size_t reasonIfUnsupportedMaxLength)
273 {
274     FORWARD_LAYER_SUPPORT_FUNC(backend, IsPermuteSupported, input, output, descriptor);
275 }
276
277 bool IsPooling2dSupported(const BackendId& backend,
278                           const TensorInfo& input,
279                           const TensorInfo& output,
280                           const Pooling2dDescriptor& descriptor,
281                           char* reasonIfUnsupported,
282                           size_t reasonIfUnsupportedMaxLength)
283 {
284     FORWARD_LAYER_SUPPORT_FUNC(backend, IsPooling2dSupported, input, output, descriptor);
285 }
286
287 bool IsResizeBilinearSupported(const BackendId& backend,
288                                const TensorInfo& input,
289                                char* reasonIfUnsupported,
290                                size_t reasonIfUnsupportedMaxLength)
291 {
292     FORWARD_LAYER_SUPPORT_FUNC(backend, IsResizeBilinearSupported, input);
293 }
294
295 bool IsSoftmaxSupported(const BackendId& backend,
296                         const TensorInfo& input,
297                         const TensorInfo& output,
298                         const SoftmaxDescriptor& descriptor,
299                         char* reasonIfUnsupported,
300                         size_t reasonIfUnsupportedMaxLength)
301 {
302     FORWARD_LAYER_SUPPORT_FUNC(backend, IsSoftmaxSupported, input, output, descriptor);
303 }
304
305 bool IsSpaceToBatchNdSupported(const BackendId& backend,
306                                const TensorInfo& input,
307                                const TensorInfo& output,
308                                const SpaceToBatchNdDescriptor& descriptor,
309                                char* reasonIfUnsupported,
310                                size_t reasonIfUnsupportedMaxLength)
311 {
312     FORWARD_LAYER_SUPPORT_FUNC(backend, IsSpaceToBatchNdSupported, input, output, descriptor);
313 }
314
315 bool IsSplitterSupported(const BackendId& backend,
316                          const TensorInfo& input,
317                          const ViewsDescriptor& descriptor,
318                          char* reasonIfUnsupported,
319                          size_t reasonIfUnsupportedMaxLength)
320 {
321     FORWARD_LAYER_SUPPORT_FUNC(backend, IsSplitterSupported, input, descriptor);
322 }
323
324 bool IsFakeQuantizationSupported(const BackendId& backend,
325                                  const TensorInfo& input,
326                                  const FakeQuantizationDescriptor& descriptor,
327                                  char* reasonIfUnsupported,
328                                  size_t reasonIfUnsupportedMaxLength)
329 {
330     FORWARD_LAYER_SUPPORT_FUNC(backend, IsFakeQuantizationSupported, input, descriptor);
331 }
332
333 bool IsReshapeSupported(const BackendId& backend,
334                         const TensorInfo& input,
335                         char* reasonIfUnsupported,
336                         size_t reasonIfUnsupportedMaxLength)
337 {
338     FORWARD_LAYER_SUPPORT_FUNC(backend, IsReshapeSupported, input);
339 }
340
341 bool IsFloorSupported(const BackendId& backend,
342                       const TensorInfo& input,
343                       const TensorInfo& output,
344                       char* reasonIfUnsupported,
345                       size_t reasonIfUnsupportedMaxLength)
346 {
347     // By definition (that is, regardless of compute device), shapes and data type must match.
348     if (input.GetShape() != output.GetShape() || input.GetDataType() != output.GetDataType())
349     {
350         return false;
351     }
352
353     FORWARD_LAYER_SUPPORT_FUNC(backend, IsFloorSupported, input, output);
354 }
355
356 bool IsMeanSupported(const BackendId& backend,
357                      const TensorInfo& input,
358                      const TensorInfo& output,
359                      const MeanDescriptor& descriptor,
360                      char* reasonIfUnsupported,
361                      size_t reasonIfUnsupportedMaxLength)
362 {
363     FORWARD_LAYER_SUPPORT_FUNC(backend, IsMeanSupported, input, output, descriptor);
364 }
365
366 bool IsPadSupported(const BackendId& backend,
367                     const TensorInfo& input,
368                     const TensorInfo& output,
369                     const PadDescriptor& descriptor,
370                     char* reasonIfUnsupported,
371                     size_t reasonIfUnsupportedMaxLength)
372 {
373
374     FORWARD_LAYER_SUPPORT_FUNC(backend, IsPadSupported, input, output, descriptor);
375 }
376
377 }