2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
5 #include <armnn/LayerSupport.hpp>
6 #include <armnn/Optional.hpp>
8 #include <backendsCommon/LayerSupportRegistry.hpp>
10 #include <boost/assert.hpp>
14 #include <unordered_map>
21 /// Helper function to copy a full string to a truncated version.
22 void CopyErrorMessage(char* truncatedString, const char* fullString, size_t maxLength)
24 if(truncatedString != nullptr)
26 size_t copyLength = std::min(maxLength, strlen(fullString));
27 std::strncpy(truncatedString, fullString, copyLength);
28 // Ensure null-terminated string.
29 truncatedString[copyLength] = '\0';
35 // Helper macro to avoid code duplication.
36 // Forwards function func to funcRef, funcNeon or funcCl, depending on the value of compute.
37 #define FORWARD_LAYER_SUPPORT_FUNC(backendId, func, ...) \
38 std::string reasonIfUnsupportedFull; \
41 auto factoryFunc = LayerSupportRegistryInstance().GetFactory(backendId); \
42 auto layerSupportObject = factoryFunc(EmptyInitializer()); \
43 isSupported = layerSupportObject->func(__VA_ARGS__, Optional<std::string&>(reasonIfUnsupportedFull)); \
44 CopyErrorMessage(reasonIfUnsupported, reasonIfUnsupportedFull.c_str(), reasonIfUnsupportedMaxLength); \
45 } catch (InvalidArgumentException e) { \
46 /* re-throwing with more context information */ \
47 throw InvalidArgumentException(e, "Failed to check layer support", CHECK_LOCATION()); \
51 bool CheckTensorDataTypesEqual(const TensorInfo& input0, const TensorInfo& input1)
53 return input0.GetDataType() == input1.GetDataType();
56 bool IsActivationSupported(const BackendId& backend,
57 const TensorInfo& input,
58 const TensorInfo& output,
59 const ActivationDescriptor& descriptor,
60 char* reasonIfUnsupported,
61 size_t reasonIfUnsupportedMaxLength)
63 FORWARD_LAYER_SUPPORT_FUNC(backend, IsActivationSupported, input, output, descriptor);
66 bool IsAdditionSupported(const BackendId& backend,
67 const TensorInfo& input0,
68 const TensorInfo& input1,
69 const TensorInfo& output,
70 char* reasonIfUnsupported,
71 size_t reasonIfUnsupportedMaxLength)
73 if(!CheckTensorDataTypesEqual(input0, input1))
78 FORWARD_LAYER_SUPPORT_FUNC(backend, IsAdditionSupported, input0, input1, output);
81 bool IsBatchNormalizationSupported(const BackendId& backend,
82 const TensorInfo& input,
83 const TensorInfo& output,
84 const TensorInfo& mean,
85 const TensorInfo& var,
86 const TensorInfo& beta,
87 const TensorInfo& gamma,
88 const BatchNormalizationDescriptor& descriptor,
89 char* reasonIfUnsupported,
90 size_t reasonIfUnsupportedMaxLength)
92 FORWARD_LAYER_SUPPORT_FUNC(backend,
93 IsBatchNormalizationSupported,
103 bool IsConstantSupported(const BackendId& backend,
104 const TensorInfo& output,
105 char* reasonIfUnsupported,
106 size_t reasonIfUnsupportedMaxLength)
108 FORWARD_LAYER_SUPPORT_FUNC(backend, IsConstantSupported, output);
111 bool IsConvertFp16ToFp32Supported(const BackendId& backend,
112 const TensorInfo& input,
113 const TensorInfo& output,
114 char* reasonIfUnsupported,
115 size_t reasonIfUnsupportedMaxLength)
117 FORWARD_LAYER_SUPPORT_FUNC(backend, IsConvertFp16ToFp32Supported, input, output);
120 bool IsConvertFp32ToFp16Supported(const BackendId& backend,
121 const TensorInfo& input,
122 const TensorInfo& output,
123 char* reasonIfUnsupported,
124 size_t reasonIfUnsupportedMaxLength)
126 FORWARD_LAYER_SUPPORT_FUNC(backend, IsConvertFp32ToFp16Supported, input, output);
129 bool IsConvolution2dSupported(const BackendId& backend,
130 const TensorInfo& input,
131 const TensorInfo& output,
132 const Convolution2dDescriptor& descriptor,
133 const TensorInfo& weights,
134 const Optional<TensorInfo>& biases,
135 char* reasonIfUnsupported,
136 size_t reasonIfUnsupportedMaxLength)
138 FORWARD_LAYER_SUPPORT_FUNC(backend, IsConvolution2dSupported, input, output, descriptor, weights, biases);
141 bool IsDivisionSupported(const BackendId& backend,
142 const TensorInfo& input0,
143 const TensorInfo& input1,
144 const TensorInfo& output,
145 char* reasonIfUnsupported,
146 size_t reasonIfUnsupportedMaxLength)
148 FORWARD_LAYER_SUPPORT_FUNC(backend, IsDivisionSupported, input0, input1, output);
151 bool IsSubtractionSupported(const BackendId& backend,
152 const TensorInfo& input0,
153 const TensorInfo& input1,
154 const TensorInfo& output,
155 char* reasonIfUnsupported,
156 size_t reasonIfUnsupportedMaxLength)
158 FORWARD_LAYER_SUPPORT_FUNC(backend, IsSubtractionSupported, input0, input1, output);
161 bool IsDepthwiseConvolutionSupported(const BackendId& backend,
162 const TensorInfo& input,
163 const TensorInfo& output,
164 const DepthwiseConvolution2dDescriptor& descriptor,
165 const TensorInfo& weights,
166 const Optional<TensorInfo>& biases,
167 char* reasonIfUnsupported,
168 size_t reasonIfUnsupportedMaxLength)
170 FORWARD_LAYER_SUPPORT_FUNC(backend, IsDepthwiseConvolutionSupported, input, output, descriptor, weights, biases);
173 bool IsInputSupported(const BackendId& backend,
174 const TensorInfo& input,
175 char* reasonIfUnsupported,
176 size_t reasonIfUnsupportedMaxLength)
178 FORWARD_LAYER_SUPPORT_FUNC(backend, IsInputSupported, input);
181 bool IsFullyConnectedSupported(const BackendId& backend,
182 const TensorInfo& input,
183 const TensorInfo& output,
184 const TensorInfo& weights,
185 const TensorInfo& biases,
186 const FullyConnectedDescriptor& descriptor,
187 char* reasonIfUnsupported,
188 size_t reasonIfUnsupportedMaxLength)
190 FORWARD_LAYER_SUPPORT_FUNC(backend, IsFullyConnectedSupported, input, output, weights, biases, descriptor);
193 bool IsL2NormalizationSupported(const BackendId& backend,
194 const TensorInfo& input,
195 const TensorInfo& output,
196 const L2NormalizationDescriptor& descriptor,
197 char* reasonIfUnsupported,
198 size_t reasonIfUnsupportedMaxLength)
200 FORWARD_LAYER_SUPPORT_FUNC(backend, IsL2NormalizationSupported, input, output, descriptor);
203 bool IsLstmSupported(const BackendId& backend, const TensorInfo& input, const TensorInfo& outputStateIn,
204 const TensorInfo& cellStateIn, const TensorInfo& scratchBuffer,
205 const TensorInfo& outputStateOut, const TensorInfo& cellStateOut,
206 const TensorInfo& output, const LstmDescriptor& descriptor,
207 const TensorInfo& inputToForgetWeights, const TensorInfo& inputToCellWeights,
208 const TensorInfo& inputToOutputWeights, const TensorInfo& recurrentToForgetWeights,
209 const TensorInfo& recurrentToCellWeights, const TensorInfo& recurrentToOutputWeights,
210 const TensorInfo& forgetGateBias, const TensorInfo& cellBias,
211 const TensorInfo& outputGateBias, const TensorInfo* inputToInputWeights,
212 const TensorInfo* recurrentToInputWeights, const TensorInfo* cellToInputWeights,
213 const TensorInfo* inputGateBias, const TensorInfo* projectionWeights,
214 const TensorInfo* projectionBias, const TensorInfo* cellToForgetWeights,
215 const TensorInfo* cellToOutputWeights, char* reasonIfUnsupported,
216 size_t reasonIfUnsupportedMaxLength)
219 FORWARD_LAYER_SUPPORT_FUNC(backend, IsLstmSupported, input, outputStateIn, cellStateIn,
220 scratchBuffer, outputStateOut, cellStateOut,
221 output, descriptor, inputToForgetWeights, inputToCellWeights,
222 inputToOutputWeights, recurrentToForgetWeights,
223 recurrentToCellWeights, recurrentToOutputWeights,
224 forgetGateBias, cellBias, outputGateBias,
225 inputToInputWeights, recurrentToInputWeights,
226 cellToInputWeights, inputGateBias, projectionWeights,
227 projectionBias, cellToForgetWeights, cellToOutputWeights);
229 bool IsMergerSupported(const BackendId& backend,
230 std::vector<const TensorInfo*> inputs,
231 const OriginsDescriptor& descriptor,
232 char* reasonIfUnsupported,
233 size_t reasonIfUnsupportedMaxLength)
235 BOOST_ASSERT(inputs.size() > 0);
236 FORWARD_LAYER_SUPPORT_FUNC(backend, IsMergerSupported, inputs, descriptor);
239 bool IsMultiplicationSupported(const BackendId& backend,
240 const TensorInfo& input0,
241 const TensorInfo& input1,
242 const TensorInfo& output,
243 char* reasonIfUnsupported,
244 size_t reasonIfUnsupportedMaxLength)
246 FORWARD_LAYER_SUPPORT_FUNC(backend, IsMultiplicationSupported, input0, input1, output);
249 bool IsNormalizationSupported(const BackendId& backend,
250 const TensorInfo& input,
251 const TensorInfo& output,
252 const NormalizationDescriptor& descriptor,
253 char* reasonIfUnsupported,
254 size_t reasonIfUnsupportedMaxLength)
256 FORWARD_LAYER_SUPPORT_FUNC(backend, IsNormalizationSupported, input, output, descriptor);
259 bool IsOutputSupported(const BackendId& backend,
260 const TensorInfo& output,
261 char* reasonIfUnsupported,
262 size_t reasonIfUnsupportedMaxLength)
264 FORWARD_LAYER_SUPPORT_FUNC(backend, IsOutputSupported, output);
267 bool IsPermuteSupported(const BackendId& backend,
268 const TensorInfo& input,
269 const TensorInfo& output,
270 const PermuteDescriptor& descriptor,
271 char* reasonIfUnsupported,
272 size_t reasonIfUnsupportedMaxLength)
274 FORWARD_LAYER_SUPPORT_FUNC(backend, IsPermuteSupported, input, output, descriptor);
277 bool IsPooling2dSupported(const BackendId& backend,
278 const TensorInfo& input,
279 const TensorInfo& output,
280 const Pooling2dDescriptor& descriptor,
281 char* reasonIfUnsupported,
282 size_t reasonIfUnsupportedMaxLength)
284 FORWARD_LAYER_SUPPORT_FUNC(backend, IsPooling2dSupported, input, output, descriptor);
287 bool IsResizeBilinearSupported(const BackendId& backend,
288 const TensorInfo& input,
289 char* reasonIfUnsupported,
290 size_t reasonIfUnsupportedMaxLength)
292 FORWARD_LAYER_SUPPORT_FUNC(backend, IsResizeBilinearSupported, input);
295 bool IsSoftmaxSupported(const BackendId& backend,
296 const TensorInfo& input,
297 const TensorInfo& output,
298 const SoftmaxDescriptor& descriptor,
299 char* reasonIfUnsupported,
300 size_t reasonIfUnsupportedMaxLength)
302 FORWARD_LAYER_SUPPORT_FUNC(backend, IsSoftmaxSupported, input, output, descriptor);
305 bool IsSpaceToBatchNdSupported(const BackendId& backend,
306 const TensorInfo& input,
307 const TensorInfo& output,
308 const SpaceToBatchNdDescriptor& descriptor,
309 char* reasonIfUnsupported,
310 size_t reasonIfUnsupportedMaxLength)
312 FORWARD_LAYER_SUPPORT_FUNC(backend, IsSpaceToBatchNdSupported, input, output, descriptor);
315 bool IsSplitterSupported(const BackendId& backend,
316 const TensorInfo& input,
317 const ViewsDescriptor& descriptor,
318 char* reasonIfUnsupported,
319 size_t reasonIfUnsupportedMaxLength)
321 FORWARD_LAYER_SUPPORT_FUNC(backend, IsSplitterSupported, input, descriptor);
324 bool IsFakeQuantizationSupported(const BackendId& backend,
325 const TensorInfo& input,
326 const FakeQuantizationDescriptor& descriptor,
327 char* reasonIfUnsupported,
328 size_t reasonIfUnsupportedMaxLength)
330 FORWARD_LAYER_SUPPORT_FUNC(backend, IsFakeQuantizationSupported, input, descriptor);
333 bool IsReshapeSupported(const BackendId& backend,
334 const TensorInfo& input,
335 char* reasonIfUnsupported,
336 size_t reasonIfUnsupportedMaxLength)
338 FORWARD_LAYER_SUPPORT_FUNC(backend, IsReshapeSupported, input);
341 bool IsFloorSupported(const BackendId& backend,
342 const TensorInfo& input,
343 const TensorInfo& output,
344 char* reasonIfUnsupported,
345 size_t reasonIfUnsupportedMaxLength)
347 // By definition (that is, regardless of compute device), shapes and data type must match.
348 if (input.GetShape() != output.GetShape() || input.GetDataType() != output.GetDataType())
353 FORWARD_LAYER_SUPPORT_FUNC(backend, IsFloorSupported, input, output);
356 bool IsMeanSupported(const BackendId& backend,
357 const TensorInfo& input,
358 const TensorInfo& output,
359 const MeanDescriptor& descriptor,
360 char* reasonIfUnsupported,
361 size_t reasonIfUnsupportedMaxLength)
363 FORWARD_LAYER_SUPPORT_FUNC(backend, IsMeanSupported, input, output, descriptor);
366 bool IsPadSupported(const BackendId& backend,
367 const TensorInfo& input,
368 const TensorInfo& output,
369 const PadDescriptor& descriptor,
370 char* reasonIfUnsupported,
371 size_t reasonIfUnsupportedMaxLength)
374 FORWARD_LAYER_SUPPORT_FUNC(backend, IsPadSupported, input, output, descriptor);