Release 18.03
[platform/upstream/armnn.git] / src / armnn / backends / NeonLayerSupport.cpp
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // See LICENSE file in the project root for full license information.
4 //
5
6 #include "NeonLayerSupport.hpp"
7
8 #include "LayerSupportCommon.hpp"
9 #include "InternalTypes.hpp"
10
11 #include <armnn/Descriptors.hpp>
12 #include <armnn/Types.hpp>
13 #include <armnn/Tensor.hpp>
14
15 #include <boost/core/ignore_unused.hpp>
16
17 #ifdef ARMCOMPUTENEON_ENABLED
18 #include "NeonWorkloads/NeonPooling2dBaseWorkload.hpp"
19 #include "NeonWorkloads/NeonPermuteWorkload.hpp"
20 #endif
21
22 using namespace boost;
23
24 namespace armnn
25 {
26 bool IsNeonActivationUint8Supported(std::string* reasonIfUnsupported, const ActivationDescriptor& parameters)
27 {
28     if (parameters.m_Function != ActivationFunction::BoundedReLu)
29     {
30         if (reasonIfUnsupported)
31         {
32             *reasonIfUnsupported = "Unsupported activation function, only BoundedReLu is supported)";
33         }
34
35         return false;
36     }
37
38     return true;
39 }
40
41 bool IsNeonDirectConvolutionPreferred(const TensorInfo& weightInfo, const Convolution2dDescriptor& desc)
42 {
43     // See arm_compute::NEDirectConvolutionLayer documentation for the supported cases,
44     // and complement with NEDirectConvolutionLayerKernel::configure() implementation
45
46     // Only 1x1 is using direct convolution. Performance results and details are in:
47     //    https://jira.arm.com/browse/IVGCVSW-1003
48     // Measurements were taken as of clframework: f105ab972135bcd21304883eff040d7e587099bc
49
50     const bool dataTypeSupported = (weightInfo.GetDataType() == armnn::DataType::Float32);
51
52     // Strides: 1|2|3
53     const bool strideSupported = (desc.m_StrideX == 1 || desc.m_StrideX == 2 || desc.m_StrideX == 3) &&
54                                  (desc.m_StrideY == 1 || desc.m_StrideY == 2 || desc.m_StrideY == 3);
55
56     auto paddingLargerThan = [](const Convolution2dDescriptor& desc, unsigned int value)
57     {
58         return desc.m_PadLeft > value || desc.m_PadRight > value || desc.m_PadTop > value || desc.m_PadBottom > value;
59     };
60
61     // Supported sizes and padding
62     const bool sizeAndPaddingSupported =
63         // Pad > 0 not supported for 1x1 weights
64         (weightInfo.GetShape()[2] == 1 && weightInfo.GetShape()[3] == 1 && !paddingLargerThan(desc, 0u));
65
66     const bool preferDirectConvolution = dataTypeSupported &&
67                                          strideSupported &&
68                                          sizeAndPaddingSupported &&
69                                          // NEDirectConvolutionLayerKernel doesn't support NULL bias
70                                          desc.m_BiasEnabled;
71     return preferDirectConvolution;
72 }
73
74 bool IsNeonMultiplicationParamsSupported(std::string* reasonIfUnsupported,
75                                          const TensorInfo& info0,
76                                          const TensorInfo& info1)
77 {
78     if (info0.GetShape() == info1.GetShape())
79     {
80         return true;
81     }
82
83     if (reasonIfUnsupported)
84     {
85         *reasonIfUnsupported = "Multiplication on Neon does not support implicit broadcast.";
86     }
87     return false;
88 }
89
90 bool IsNeonNormalizationDescParamsSupported(std::string* reasonIfUnsupported, const NormalizationDescriptor& parameters)
91 {
92     if (parameters.m_NormMethodType != NormalizationAlgorithmMethod::LocalBrightness)
93     {
94         if (reasonIfUnsupported)
95         {
96             *reasonIfUnsupported = "Unsupported normalisation method type, only LocalBrightness is supported";
97         }
98         return false;
99     }
100     if (parameters.m_NormSize % 2 == 0)
101     {
102         if (reasonIfUnsupported)
103         {
104             *reasonIfUnsupported = "Normalization size must be an odd number.";
105         }
106         return false;
107     }
108
109     return true;
110 }
111
112 bool IsNeonBackendSupported(std::string* reasonIfUnsupported)
113 {
114 #if ARMCOMPUTENEON_ENABLED
115     return true;
116 #else
117     if (reasonIfUnsupported != nullptr)
118     {
119         *reasonIfUnsupported = "The armnn library has been built without NEON support";
120     }
121     return false;
122 #endif
123 }
124
125 template<typename Float32Func, typename Uint8Func, typename ... Params>
126 bool IsSupportedForDataTypeNeon(std::string* reasonIfUnsupported,
127                                 DataType dataType,
128                                 Float32Func floatFuncPtr,
129                                 Uint8Func uint8FuncPtr,
130                                 Params&&... params)
131 {
132     return IsNeonBackendSupported(reasonIfUnsupported) &&
133         IsSupportedForDataTypeGeneric(reasonIfUnsupported,
134                                          dataType,
135                                          floatFuncPtr,
136                                          uint8FuncPtr,
137                                          std::forward<Params>(params)...);
138 }
139
140 #if ARMCOMPUTENEON_ENABLED
141 template<class FuncType, class... Args>
142 inline bool IsWorkloadSupported(FuncType& func, std::string* reasonIfUnsupported, Args&&... args)
143 {
144     arm_compute::Status aclStatus = func(std::forward<Args>(args)...);
145     const bool supported = (aclStatus.error_code() == arm_compute::ErrorCode::OK);
146     if (!supported && reasonIfUnsupported)
147     {
148         *reasonIfUnsupported = aclStatus.error_description();
149     }
150     return supported;
151 }
152
153 #define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
154     return IsWorkloadSupported(func, reasonIfUnsupported, __VA_ARGS__);
155 #else
156 #define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
157     return IsNeonBackendSupported(reasonIfUnsupported);
158 #endif
159
160 bool IsActivationSupportedNeon(const TensorInfo& input,
161                                const ActivationDescriptor& descriptor,
162                                std::string* reasonIfUnsupported)
163 {
164     ignore_unused(descriptor);
165     return IsSupportedForDataTypeNeon(reasonIfUnsupported,
166                                       input.GetDataType(),
167                                       &TrueFunc<const ActivationDescriptor&>,
168                                       &IsNeonActivationUint8Supported,
169                                       descriptor);
170 }
171
172 bool IsNeonDepthwiseConvolution2dDescParamsSupported(std::string* reasonIfUnsupported,
173                                                      const DepthwiseConvolution2dDescriptor& parameters,
174                                                      const TensorInfo& weights)
175 {
176     ignore_unused(weights);
177
178     if (parameters.m_StrideX < 1 || parameters.m_StrideX > 3)
179     {
180         if (reasonIfUnsupported)
181         {
182             *reasonIfUnsupported = "m_StrideX can only be 1, 2 or 3";
183         }
184         return false;
185     }
186
187     // weights.GetShape()[0] = channel multiplier
188     if (weights.GetShape()[0] != 1)
189     {
190         if (reasonIfUnsupported)
191         {
192             *reasonIfUnsupported = "Channel multiplier only supports the value 1 in the NEON backend";
193         }
194         return false;
195     }
196
197     if (parameters.m_PadLeft != parameters.m_PadRight || parameters.m_PadTop != parameters.m_PadBottom)
198     {
199         if (reasonIfUnsupported)
200         {
201             *reasonIfUnsupported = "Asymmetric padding for depthwise convolution currently not supported "
202                 "in Neon backend";
203         }
204         return false;
205     }
206
207     return true;
208 }
209
210 bool IsAdditionSupportedNeon(const TensorInfo& input0,
211                              const TensorInfo& input1,
212                              const TensorInfo& output,
213                              std::string* reasonIfUnsupported)
214 {
215     ignore_unused(input1);
216     ignore_unused(output);
217     return IsSupportedForDataTypeNeon(reasonIfUnsupported,
218                                       input0.GetDataType(),
219                                       &TrueFunc<>,
220                                       &FalseFuncU8<>);
221 }
222
223 bool IsBatchNormalizationSupportedNeon(const TensorInfo& input,
224                                        const BatchNormalizationDescriptor& descriptor,
225                                        std::string* reasonIfUnsupported)
226 {
227     ignore_unused(descriptor);
228     return IsSupportedForDataTypeNeon(reasonIfUnsupported,
229                                       input.GetDataType(),
230                                       &TrueFunc<>,
231                                       &FalseFuncU8<>);
232 }
233
234 bool IsConstantSupportedNeon(const TensorInfo& output,
235                              std::string* reasonIfUnsupported)
236 {
237     return IsSupportedForDataTypeNeon(reasonIfUnsupported,
238                                       output.GetDataType(),
239                                       &TrueFunc<>,
240                                       &TrueFunc<>);
241 }
242
243 bool IsConvolution2dSupportedNeon(const TensorInfo& input,
244                                   const Convolution2dDescriptor& descriptor,
245                                   const TensorInfo& weights,
246                                   std::string* reasonIfUnsupported)
247 {
248     ignore_unused(descriptor);
249     return IsSupportedForDataTypeNeon(reasonIfUnsupported,
250                                       input.GetDataType(),
251                                       &TrueFunc<>,
252                                       &TrueFunc<>);
253 }
254
255 bool IsDepthwiseConvolutionSupportedNeon(const TensorInfo& input,
256                                          const DepthwiseConvolution2dDescriptor& descriptor,
257                                          const TensorInfo& weights,
258                                          std::string* reasonIfUnsupported)
259 {
260     return IsSupportedForDataTypeNeon(reasonIfUnsupported,
261                                       input.GetDataType(),
262                                       &IsNeonDepthwiseConvolution2dDescParamsSupported,
263                                       &IsNeonDepthwiseConvolution2dDescParamsSupported,
264                                       descriptor,
265                                       weights);
266 }
267
268 bool IsFullyConnectedSupportedNeon(const TensorInfo& input,
269                                    const FullyConnectedDescriptor& descriptor,
270                                    std::string* reasonIfUnsupported)
271 {
272     ignore_unused(descriptor);
273     return IsSupportedForDataTypeNeon(reasonIfUnsupported,
274                                       input.GetDataType(),
275                                       &TrueFunc<>,
276                                       &FalseFuncU8<>);
277 }
278
279 bool IsInputSupportedNeon(const TensorInfo& input,
280                           std::string* reasonIfUnsupported)
281 {
282     return IsSupportedForDataTypeNeon(reasonIfUnsupported,
283                                       input.GetDataType(),
284                                       &TrueFunc<>,
285                                       &TrueFunc<>);
286 }
287
288 bool IsL2NormalizationSupportedNeon(const TensorInfo& input,
289                                     std::string* reasonIfUnsupported)
290 {
291     return IsSupportedForDataTypeNeon(reasonIfUnsupported,
292                                       input.GetDataType(),
293                                       &TrueFunc<>,
294                                       &FalseFunc<>);
295 }
296
297 bool IsMergerSupportedNeon(const std::vector<const TensorInfo*> inputs,
298                            const OriginsDescriptor& descriptor,
299                            std::string* reasonIfUnsupported)
300 {
301     ignore_unused(descriptor);
302     return IsSupportedForDataTypeNeon(reasonIfUnsupported,
303                                       inputs[0]->GetDataType(),
304                                       &TrueFunc<>,
305                                       &TrueFunc<>);
306 }
307
308 bool IsMultiplicationSupportedNeon(const TensorInfo& input0,
309                                    const TensorInfo& input1,
310                                    std::string* reasonIfUnsupported)
311 {
312     return IsSupportedForDataTypeNeon(reasonIfUnsupported,
313                                       input0.GetDataType(),
314                                       &IsNeonMultiplicationParamsSupported,
315                                       &FalseFuncU8<const TensorInfo&, const TensorInfo&>,
316                                       input0,
317                                       input1
318                             );
319 }
320
321 bool IsNormalizationSupportedNeon(const TensorInfo& input,
322                                   const TensorInfo& output,
323                                   const NormalizationDescriptor& descriptor,
324                                   std::string* reasonIfUnsupported)
325 {
326     return IsSupportedForDataTypeNeon(reasonIfUnsupported,
327                                       input.GetDataType(),
328                                       &IsNeonNormalizationDescParamsSupported,
329                                       &FalseFuncU8<const NormalizationDescriptor&>,
330                                       descriptor);
331 }
332
333 bool IsOutputSupportedNeon(const TensorInfo& output,
334                            std::string* reasonIfUnsupported)
335 {
336     return IsSupportedForDataTypeNeon(reasonIfUnsupported,
337                                       output.GetDataType(),
338                                       &TrueFunc<>,
339                                       &TrueFunc<>);
340 }
341
342 bool IsPermuteSupportedNeon(const TensorInfo& input,
343                             const TensorInfo& output,
344                             const PermuteDescriptor& descriptor,
345                             std::string* reasonIfUnsupported)
346 {
347     FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPermuteWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
348 }
349
350 bool IsPooling2dSupportedNeon(const TensorInfo& input,
351                               const TensorInfo& output,
352                               const Pooling2dDescriptor& descriptor,
353                               std::string* reasonIfUnsupported)
354 {
355     FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPooling2dWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
356 }
357
358 bool IsResizeBilinearSupportedNeon(const TensorInfo& input,
359                                    std::string* reasonIfUnsupported)
360 {
361     ignore_unused(input);
362     return false;
363 }
364
365 bool IsSoftmaxSupportedNeon(const TensorInfo& input,
366                             const SoftmaxDescriptor& descriptor,
367                             std::string* reasonIfUnsupported)
368 {
369     ignore_unused(descriptor);
370     return IsSupportedForDataTypeNeon(reasonIfUnsupported,
371                                       input.GetDataType(),
372                                       &TrueFunc<>,
373                                       &TrueFunc<>);
374 }
375
376 bool IsSplitterSupportedNeon(const TensorInfo& input,
377                              const ViewsDescriptor& descriptor,
378                              std::string* reasonIfUnsupported)
379 {
380     ignore_unused(descriptor);
381     return IsSupportedForDataTypeNeon(reasonIfUnsupported,
382                                       input.GetDataType(),
383                                       &TrueFunc<>,
384                                       &TrueFunc<>);
385 }
386
387 bool IsFakeQuantizationSupportedNeon(const TensorInfo& input,
388                                      const FakeQuantizationDescriptor& descriptor,
389                                      std::string* reasonIfUnsupported)
390 {
391     ignore_unused(input);
392     ignore_unused(descriptor);
393     return false;
394 }
395
396 bool IsReshapeSupportedNeon(const TensorInfo& input,
397                             std::string* reasonIfUnsupported)
398 {
399     return IsSupportedForDataTypeNeon(reasonIfUnsupported,
400                                       input.GetDataType(),
401                                       &TrueFunc<>,
402                                       &TrueFunc<>);
403 }
404
405 bool IsFloorSupportedNeon(const TensorInfo& input,
406                           const TensorInfo& output,
407                           std::string* reasonIfUnsupported)
408 {
409     ignore_unused(output);
410     return IsSupportedForDataTypeNeon(reasonIfUnsupported,
411                                       input.GetDataType(),
412                                       &TrueFunc<>,
413                                       &FalseFuncU8<>);
414 }
415
416 }