Release 18.08
[platform/upstream/armnn.git] / src / armnn / LayerSupport.cpp
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // See LICENSE file in the project root for full license information.
4 //
5 #include "armnn/LayerSupport.hpp"
6
7 #include "backends/RefLayerSupport.hpp"
8 #include "backends/NeonLayerSupport.hpp"
9 #include "backends/ClLayerSupport.hpp"
10
11 #include <boost/assert.hpp>
12
13 #include <cstring>
14 #include <algorithm>
15
16 namespace armnn
17 {
18
19 /// Helper function to copy a full string to a truncated version.
20 void CopyErrorMessage(char* truncatedString, const char* fullString, size_t maxLength)
21 {
22     if(truncatedString != nullptr)
23     {
24         size_t copyLength = std::min(maxLength, strlen(fullString));
25         std::strncpy(truncatedString, fullString, copyLength);
26         // Ensure null-terminated string.
27         truncatedString[copyLength] = '\0';
28     }
29 }
30
31 // Helper macro to avoid code duplication.
32 // Forwards function func to funcRef, funcNeon or funcCl, depending on the value of compute.
33 #define FORWARD_LAYER_SUPPORT_FUNC(compute, func, ...) \
34     std::string reasonIfUnsupportedFull; \
35     bool isSupported; \
36     switch(compute) \
37     { \
38         case Compute::CpuRef: \
39             isSupported = func##Ref(__VA_ARGS__, &reasonIfUnsupportedFull); \
40             break; \
41         case Compute::CpuAcc: \
42             isSupported = func##Neon(__VA_ARGS__, &reasonIfUnsupportedFull); \
43             break; \
44         case Compute::GpuAcc: \
45             isSupported = func##Cl(__VA_ARGS__, &reasonIfUnsupportedFull); \
46             break; \
47         default: \
48             isSupported = func##Ref(__VA_ARGS__, &reasonIfUnsupportedFull); \
49             break; \
50     } \
51     CopyErrorMessage(reasonIfUnsupported, reasonIfUnsupportedFull.c_str(), reasonIfUnsupportedMaxLength); \
52     return isSupported;
53
54 bool CheckTensorDataTypesEqual(const TensorInfo& input0, const TensorInfo& input1)
55 {
56     return input0.GetDataType() == input1.GetDataType();
57 }
58
59 bool IsActivationSupported(Compute compute,
60                            const TensorInfo& input,
61                            const TensorInfo& output,
62                            const ActivationDescriptor& descriptor,
63                            char* reasonIfUnsupported,
64                            size_t reasonIfUnsupportedMaxLength)
65 {
66     FORWARD_LAYER_SUPPORT_FUNC(compute, IsActivationSupported, input, output, descriptor);
67 }
68
69 bool IsAdditionSupported(Compute compute,
70                          const TensorInfo& input0,
71                          const TensorInfo& input1,
72                          const TensorInfo& output,
73                          char* reasonIfUnsupported,
74                          size_t reasonIfUnsupportedMaxLength)
75 {
76     if(!CheckTensorDataTypesEqual(input0, input1))
77     {
78         return false;
79     }
80
81     FORWARD_LAYER_SUPPORT_FUNC(compute, IsAdditionSupported, input0, input1, output);
82 }
83
84 bool IsBatchNormalizationSupported(Compute compute,
85                                    const TensorInfo& input,
86                                    const TensorInfo& output,
87                                    const TensorInfo& mean,
88                                    const TensorInfo& var,
89                                    const TensorInfo& beta,
90                                    const TensorInfo& gamma,
91                                    const BatchNormalizationDescriptor& descriptor,
92                                    char* reasonIfUnsupported,
93                                    size_t reasonIfUnsupportedMaxLength)
94 {
95     FORWARD_LAYER_SUPPORT_FUNC(compute,
96                                IsBatchNormalizationSupported,
97                                input,
98                                output,
99                                mean,
100                                var,
101                                beta,
102                                gamma,
103                                descriptor);
104 }
105
106 bool IsConstantSupported(Compute compute,
107                          const TensorInfo& output,
108                          char* reasonIfUnsupported,
109                          size_t reasonIfUnsupportedMaxLength)
110 {
111     FORWARD_LAYER_SUPPORT_FUNC(compute, IsConstantSupported, output);
112 }
113
114 bool IsConvertFp16ToFp32Supported(Compute compute,
115                                   const TensorInfo& input,
116                                   const TensorInfo& output,
117                                   char* reasonIfUnsupported,
118                                   size_t reasonIfUnsupportedMaxLength)
119 {
120     FORWARD_LAYER_SUPPORT_FUNC(compute, IsConvertFp16ToFp32Supported, input, output);
121 }
122
123 bool IsConvertFp32ToFp16Supported(Compute compute,
124                                   const TensorInfo& input,
125                                   const TensorInfo& output,
126                                   char* reasonIfUnsupported,
127                                   size_t reasonIfUnsupportedMaxLength)
128 {
129     FORWARD_LAYER_SUPPORT_FUNC(compute, IsConvertFp32ToFp16Supported, input, output);
130 }
131
132 bool IsConvolution2dSupported(Compute compute,
133                               const TensorInfo& input,
134                               const TensorInfo& output,
135                               const Convolution2dDescriptor& descriptor,
136                               const TensorInfo& weights,
137                               const TensorInfo& biases,
138                               char* reasonIfUnsupported,
139                               size_t reasonIfUnsupportedMaxLength)
140 {
141     FORWARD_LAYER_SUPPORT_FUNC(compute, IsConvolution2dSupported, input, output, descriptor, weights, biases);
142 }
143
144 bool IsDepthwiseConvolutionSupported(Compute compute,
145                                      const TensorInfo& input,
146                                      const TensorInfo& output,
147                                      const DepthwiseConvolution2dDescriptor& descriptor,
148                                      const TensorInfo& weights,
149                                      const TensorInfo& biases,
150                                      char* reasonIfUnsupported,
151                                      size_t reasonIfUnsupportedMaxLength)
152 {
153     FORWARD_LAYER_SUPPORT_FUNC(compute, IsDepthwiseConvolutionSupported, input, output, descriptor, weights, biases);
154 }
155
156 bool IsInputSupported(Compute compute,
157                       const TensorInfo& input,
158                       char* reasonIfUnsupported,
159                       size_t reasonIfUnsupportedMaxLength)
160 {
161     FORWARD_LAYER_SUPPORT_FUNC(compute, IsInputSupported, input);
162 }
163
164 bool IsFullyConnectedSupported(Compute compute,
165                                const TensorInfo& input,
166                                const TensorInfo& output,
167                                const TensorInfo& weights,
168                                const TensorInfo& biases,
169                                const FullyConnectedDescriptor& descriptor,
170                                char* reasonIfUnsupported,
171                                size_t reasonIfUnsupportedMaxLength)
172 {
173     FORWARD_LAYER_SUPPORT_FUNC(compute, IsFullyConnectedSupported, input, output, weights, biases, descriptor);
174 }
175
176 bool IsL2NormalizationSupported(Compute compute,
177                                 const TensorInfo& input,
178                                 const TensorInfo& output,
179                                 char* reasonIfUnsupported,
180                                 size_t reasonIfUnsupportedMaxLength)
181 {
182     FORWARD_LAYER_SUPPORT_FUNC(compute, IsL2NormalizationSupported, input, output);
183 }
184
185 bool IsLstmSupported(Compute compute, const TensorInfo& input, const TensorInfo& outputStateIn,
186                      const TensorInfo& cellStateIn, const TensorInfo& scratchBuffer,
187                      const TensorInfo& outputStateOut, const TensorInfo& cellStateOut,
188                      const TensorInfo& output, const LstmDescriptor& descriptor,
189                      const TensorInfo& inputToForgetWeights, const TensorInfo& inputToCellWeights,
190                      const TensorInfo& inputToOutputWeights, const TensorInfo& recurrentToForgetWeights,
191                      const TensorInfo& recurrentToCellWeights, const TensorInfo& recurrentToOutputWeights,
192                      const TensorInfo& forgetGateBias, const TensorInfo& cellBias,
193                      const TensorInfo& outputGateBias, const TensorInfo* inputToInputWeights,
194                      const TensorInfo* recurrentToInputWeights, const TensorInfo* cellToInputWeights,
195                      const TensorInfo* inputGateBias, const TensorInfo* projectionWeights,
196                      const TensorInfo* projectionBias, const TensorInfo* cellToForgetWeights,
197                      const TensorInfo* cellToOutputWeights, char* reasonIfUnsupported,
198                      size_t reasonIfUnsupportedMaxLength)
199
200 {
201     FORWARD_LAYER_SUPPORT_FUNC(compute, IsLstmSupported, input, outputStateIn, cellStateIn,
202                                scratchBuffer, outputStateOut, cellStateOut,
203                                output, descriptor, inputToForgetWeights, inputToCellWeights,
204                                inputToOutputWeights, recurrentToForgetWeights,
205                                recurrentToCellWeights, recurrentToOutputWeights,
206                                forgetGateBias, cellBias, outputGateBias,
207                                inputToInputWeights, recurrentToInputWeights,
208                                cellToInputWeights, inputGateBias, projectionWeights,
209                                projectionBias, cellToForgetWeights, cellToOutputWeights);
210 }
211 bool IsMergerSupported(Compute compute,
212                        std::vector<const TensorInfo*> inputs,
213                        const OriginsDescriptor& descriptor,
214                        char* reasonIfUnsupported,
215                        size_t reasonIfUnsupportedMaxLength)
216 {
217     BOOST_ASSERT(inputs.size() > 0);
218     FORWARD_LAYER_SUPPORT_FUNC(compute, IsMergerSupported, inputs, descriptor);
219 }
220
221 bool IsMultiplicationSupported(Compute compute,
222                                const TensorInfo& input0,
223                                const TensorInfo& input1,
224                                const TensorInfo& output,
225                                char* reasonIfUnsupported,
226                                size_t reasonIfUnsupportedMaxLength)
227 {
228     FORWARD_LAYER_SUPPORT_FUNC(compute, IsMultiplicationSupported, input0, input1, output);
229 }
230
231 bool IsNormalizationSupported(Compute compute,
232                               const TensorInfo& input,
233                               const TensorInfo& output,
234                               const NormalizationDescriptor& descriptor,
235                               char* reasonIfUnsupported,
236                               size_t reasonIfUnsupportedMaxLength)
237 {
238     FORWARD_LAYER_SUPPORT_FUNC(compute, IsNormalizationSupported, input, output, descriptor);
239 }
240
241 bool IsOutputSupported(Compute compute,
242                        const TensorInfo& output,
243                        char* reasonIfUnsupported,
244                        size_t reasonIfUnsupportedMaxLength)
245 {
246     FORWARD_LAYER_SUPPORT_FUNC(compute, IsOutputSupported, output);
247 }
248
249 bool IsPermuteSupported(Compute compute,
250                         const TensorInfo& input,
251                         const TensorInfo& output,
252                         const PermuteDescriptor& descriptor,
253                         char* reasonIfUnsupported,
254                         size_t reasonIfUnsupportedMaxLength)
255 {
256     FORWARD_LAYER_SUPPORT_FUNC(compute, IsPermuteSupported, input, output, descriptor);
257 }
258
259 bool IsPooling2dSupported(Compute compute,
260                           const TensorInfo& input,
261                           const TensorInfo& output,
262                           const Pooling2dDescriptor& descriptor,
263                           char* reasonIfUnsupported,
264                           size_t reasonIfUnsupportedMaxLength)
265 {
266     FORWARD_LAYER_SUPPORT_FUNC(compute, IsPooling2dSupported, input, output, descriptor);
267 }
268
269 bool IsResizeBilinearSupported(Compute compute,
270                                const TensorInfo& input,
271                                char* reasonIfUnsupported,
272                                size_t reasonIfUnsupportedMaxLength)
273 {
274     FORWARD_LAYER_SUPPORT_FUNC(compute, IsResizeBilinearSupported, input);
275 }
276
277 bool IsSoftmaxSupported(Compute compute,
278                         const TensorInfo& input,
279                         const TensorInfo& output,
280                         const SoftmaxDescriptor& descriptor,
281                         char* reasonIfUnsupported,
282                         size_t reasonIfUnsupportedMaxLength)
283 {
284     FORWARD_LAYER_SUPPORT_FUNC(compute, IsSoftmaxSupported, input, output, descriptor);
285 }
286
287 bool IsSplitterSupported(Compute compute,
288                          const TensorInfo& input,
289                          const ViewsDescriptor& descriptor,
290                          char* reasonIfUnsupported,
291                          size_t reasonIfUnsupportedMaxLength)
292 {
293     FORWARD_LAYER_SUPPORT_FUNC(compute, IsSplitterSupported, input, descriptor);
294 }
295
296 bool IsFakeQuantizationSupported(Compute compute,
297                                  const TensorInfo& input,
298                                  const FakeQuantizationDescriptor& descriptor,
299                                  char* reasonIfUnsupported,
300                                  size_t reasonIfUnsupportedMaxLength)
301 {
302     FORWARD_LAYER_SUPPORT_FUNC(compute, IsFakeQuantizationSupported, input, descriptor);
303 }
304
305 bool IsReshapeSupported(Compute compute,
306                         const TensorInfo& input,
307                         char* reasonIfUnsupported,
308                         size_t reasonIfUnsupportedMaxLength)
309 {
310     FORWARD_LAYER_SUPPORT_FUNC(compute, IsReshapeSupported, input);
311 }
312
313 bool IsFloorSupported(Compute compute,
314                       const TensorInfo& input,
315                       const TensorInfo& output,
316                       char* reasonIfUnsupported,
317                       size_t reasonIfUnsupportedMaxLength)
318 {
319     // By definition (that is, regardless of compute device), shapes and data type must match.
320     if (input.GetShape() != output.GetShape() || input.GetDataType() != output.GetDataType())
321     {
322         return false;
323     }
324
325     FORWARD_LAYER_SUPPORT_FUNC(compute, IsFloorSupported, input, output);
326 }
327
328 }