2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
5 #include <armnn/LayerSupport.hpp>
6 #include <armnn/Optional.hpp>
7 #include <armnn/ILayerSupport.hpp>
9 #include <backendsCommon/BackendRegistry.hpp>
10 #include <backendsCommon/IBackendInternal.hpp>
12 #include <boost/assert.hpp>
16 #include <unordered_map>
17 #include <armnn/ArmNN.hpp>
24 /// Helper function to copy a full string to a truncated version.
25 void CopyErrorMessage(char* truncatedString, const char* fullString, size_t maxLength)
27 if(truncatedString != nullptr)
29 size_t copyLength = std::min(maxLength, strlen(fullString));
30 std::strncpy(truncatedString, fullString, copyLength);
31 // Ensure null-terminated string.
32 truncatedString[copyLength] = '\0';
38 // Helper macro to avoid code duplication.
39 // Forwards function func to funcRef, funcNeon or funcCl, depending on the value of compute.
40 #define FORWARD_LAYER_SUPPORT_FUNC(backendId, func, ...) \
41 std::string reasonIfUnsupportedFull; \
44 auto const& backendRegistry = BackendRegistryInstance(); \
45 if (!backendRegistry.IsBackendRegistered(backendId)) \
47 std::stringstream ss; \
48 ss << __func__ << " is not supported on " << backendId << " because this backend is not registered."; \
49 reasonIfUnsupportedFull = ss.str(); \
50 isSupported = false; \
54 auto factoryFunc = backendRegistry.GetFactory(backendId); \
55 auto backendObject = factoryFunc(); \
56 auto layerSupportObject = backendObject->GetLayerSupport(); \
57 isSupported = layerSupportObject->func(__VA_ARGS__, Optional<std::string&>(reasonIfUnsupportedFull)); \
58 CopyErrorMessage(reasonIfUnsupported, reasonIfUnsupportedFull.c_str(), reasonIfUnsupportedMaxLength); \
60 } catch (InvalidArgumentException e) { \
61 /* re-throwing with more context information */ \
62 throw InvalidArgumentException(e, "Failed to check layer support", CHECK_LOCATION()); \
66 bool CheckTensorDataTypesEqual(const TensorInfo& input0, const TensorInfo& input1)
68 return input0.GetDataType() == input1.GetDataType();
71 bool IsActivationSupported(const BackendId& backend,
72 const TensorInfo& input,
73 const TensorInfo& output,
74 const ActivationDescriptor& descriptor,
75 char* reasonIfUnsupported,
76 size_t reasonIfUnsupportedMaxLength)
78 FORWARD_LAYER_SUPPORT_FUNC(backend, IsActivationSupported, input, output, descriptor);
81 bool IsAdditionSupported(const BackendId& backend,
82 const TensorInfo& input0,
83 const TensorInfo& input1,
84 const TensorInfo& output,
85 char* reasonIfUnsupported,
86 size_t reasonIfUnsupportedMaxLength)
88 if(!CheckTensorDataTypesEqual(input0, input1))
93 FORWARD_LAYER_SUPPORT_FUNC(backend, IsAdditionSupported, input0, input1, output);
96 bool IsBatchNormalizationSupported(const BackendId& backend,
97 const TensorInfo& input,
98 const TensorInfo& output,
99 const TensorInfo& mean,
100 const TensorInfo& var,
101 const TensorInfo& beta,
102 const TensorInfo& gamma,
103 const BatchNormalizationDescriptor& descriptor,
104 char* reasonIfUnsupported,
105 size_t reasonIfUnsupportedMaxLength)
107 FORWARD_LAYER_SUPPORT_FUNC(backend,
108 IsBatchNormalizationSupported,
118 bool IsBatchToSpaceNdSupported(const BackendId& backend,
119 const TensorInfo& input,
120 const TensorInfo& output,
121 const BatchToSpaceNdDescriptor& descriptor,
122 char* reasonIfUnsupported,
123 size_t reasonIfUnsupportedMaxLength)
125 FORWARD_LAYER_SUPPORT_FUNC(backend,
126 IsBatchToSpaceNdSupported,
132 bool IsConstantSupported(const BackendId& backend,
133 const TensorInfo& output,
134 char* reasonIfUnsupported,
135 size_t reasonIfUnsupportedMaxLength)
137 FORWARD_LAYER_SUPPORT_FUNC(backend, IsConstantSupported, output);
140 bool IsConvertFp16ToFp32Supported(const BackendId& backend,
141 const TensorInfo& input,
142 const TensorInfo& output,
143 char* reasonIfUnsupported,
144 size_t reasonIfUnsupportedMaxLength)
146 FORWARD_LAYER_SUPPORT_FUNC(backend, IsConvertFp16ToFp32Supported, input, output);
149 bool IsConvertFp32ToFp16Supported(const BackendId& backend,
150 const TensorInfo& input,
151 const TensorInfo& output,
152 char* reasonIfUnsupported,
153 size_t reasonIfUnsupportedMaxLength)
155 FORWARD_LAYER_SUPPORT_FUNC(backend, IsConvertFp32ToFp16Supported, input, output);
158 bool IsConvolution2dSupported(const BackendId& backend,
159 const TensorInfo& input,
160 const TensorInfo& output,
161 const Convolution2dDescriptor& descriptor,
162 const TensorInfo& weights,
163 const Optional<TensorInfo>& biases,
164 char* reasonIfUnsupported,
165 size_t reasonIfUnsupportedMaxLength)
167 FORWARD_LAYER_SUPPORT_FUNC(backend, IsConvolution2dSupported, input, output, descriptor, weights, biases);
170 bool IsDivisionSupported(const BackendId& backend,
171 const TensorInfo& input0,
172 const TensorInfo& input1,
173 const TensorInfo& output,
174 char* reasonIfUnsupported,
175 size_t reasonIfUnsupportedMaxLength)
177 FORWARD_LAYER_SUPPORT_FUNC(backend, IsDivisionSupported, input0, input1, output);
180 bool IsSubtractionSupported(const BackendId& backend,
181 const TensorInfo& input0,
182 const TensorInfo& input1,
183 const TensorInfo& output,
184 char* reasonIfUnsupported,
185 size_t reasonIfUnsupportedMaxLength)
187 FORWARD_LAYER_SUPPORT_FUNC(backend, IsSubtractionSupported, input0, input1, output);
190 bool IsDepthwiseConvolutionSupported(const BackendId& backend,
191 const TensorInfo& input,
192 const TensorInfo& output,
193 const DepthwiseConvolution2dDescriptor& descriptor,
194 const TensorInfo& weights,
195 const Optional<TensorInfo>& biases,
196 char* reasonIfUnsupported,
197 size_t reasonIfUnsupportedMaxLength)
199 FORWARD_LAYER_SUPPORT_FUNC(backend, IsDepthwiseConvolutionSupported, input, output, descriptor, weights, biases);
202 bool IsInputSupported(const BackendId& backend,
203 const TensorInfo& input,
204 char* reasonIfUnsupported,
205 size_t reasonIfUnsupportedMaxLength)
207 FORWARD_LAYER_SUPPORT_FUNC(backend, IsInputSupported, input);
210 bool IsFullyConnectedSupported(const BackendId& backend,
211 const TensorInfo& input,
212 const TensorInfo& output,
213 const TensorInfo& weights,
214 const TensorInfo& biases,
215 const FullyConnectedDescriptor& descriptor,
216 char* reasonIfUnsupported,
217 size_t reasonIfUnsupportedMaxLength)
219 FORWARD_LAYER_SUPPORT_FUNC(backend, IsFullyConnectedSupported, input, output, weights, biases, descriptor);
222 bool IsL2NormalizationSupported(const BackendId& backend,
223 const TensorInfo& input,
224 const TensorInfo& output,
225 const L2NormalizationDescriptor& descriptor,
226 char* reasonIfUnsupported,
227 size_t reasonIfUnsupportedMaxLength)
229 FORWARD_LAYER_SUPPORT_FUNC(backend, IsL2NormalizationSupported, input, output, descriptor);
232 bool IsLstmSupported(const BackendId& backend, const TensorInfo& input, const TensorInfo& outputStateIn,
233 const TensorInfo& cellStateIn, const TensorInfo& scratchBuffer,
234 const TensorInfo& outputStateOut, const TensorInfo& cellStateOut,
235 const TensorInfo& output, const LstmDescriptor& descriptor,
236 const TensorInfo& inputToForgetWeights, const TensorInfo& inputToCellWeights,
237 const TensorInfo& inputToOutputWeights, const TensorInfo& recurrentToForgetWeights,
238 const TensorInfo& recurrentToCellWeights, const TensorInfo& recurrentToOutputWeights,
239 const TensorInfo& forgetGateBias, const TensorInfo& cellBias,
240 const TensorInfo& outputGateBias, const TensorInfo* inputToInputWeights,
241 const TensorInfo* recurrentToInputWeights, const TensorInfo* cellToInputWeights,
242 const TensorInfo* inputGateBias, const TensorInfo* projectionWeights,
243 const TensorInfo* projectionBias, const TensorInfo* cellToForgetWeights,
244 const TensorInfo* cellToOutputWeights, char* reasonIfUnsupported,
245 size_t reasonIfUnsupportedMaxLength)
248 FORWARD_LAYER_SUPPORT_FUNC(backend, IsLstmSupported, input, outputStateIn, cellStateIn,
249 scratchBuffer, outputStateOut, cellStateOut,
250 output, descriptor, inputToForgetWeights, inputToCellWeights,
251 inputToOutputWeights, recurrentToForgetWeights,
252 recurrentToCellWeights, recurrentToOutputWeights,
253 forgetGateBias, cellBias, outputGateBias,
254 inputToInputWeights, recurrentToInputWeights,
255 cellToInputWeights, inputGateBias, projectionWeights,
256 projectionBias, cellToForgetWeights, cellToOutputWeights);
258 bool IsMergerSupported(const BackendId& backend,
259 std::vector<const TensorInfo*> inputs,
260 const TensorInfo& output,
261 const OriginsDescriptor& descriptor,
262 char* reasonIfUnsupported,
263 size_t reasonIfUnsupportedMaxLength)
265 BOOST_ASSERT(inputs.size() > 0);
266 FORWARD_LAYER_SUPPORT_FUNC(backend, IsMergerSupported, inputs, output, descriptor);
269 bool IsMultiplicationSupported(const BackendId& backend,
270 const TensorInfo& input0,
271 const TensorInfo& input1,
272 const TensorInfo& output,
273 char* reasonIfUnsupported,
274 size_t reasonIfUnsupportedMaxLength)
276 FORWARD_LAYER_SUPPORT_FUNC(backend, IsMultiplicationSupported, input0, input1, output);
279 bool IsNormalizationSupported(const BackendId& backend,
280 const TensorInfo& input,
281 const TensorInfo& output,
282 const NormalizationDescriptor& descriptor,
283 char* reasonIfUnsupported,
284 size_t reasonIfUnsupportedMaxLength)
286 FORWARD_LAYER_SUPPORT_FUNC(backend, IsNormalizationSupported, input, output, descriptor);
289 bool IsOutputSupported(const BackendId& backend,
290 const TensorInfo& output,
291 char* reasonIfUnsupported,
292 size_t reasonIfUnsupportedMaxLength)
294 FORWARD_LAYER_SUPPORT_FUNC(backend, IsOutputSupported, output);
297 bool IsPermuteSupported(const BackendId& backend,
298 const TensorInfo& input,
299 const TensorInfo& output,
300 const PermuteDescriptor& descriptor,
301 char* reasonIfUnsupported,
302 size_t reasonIfUnsupportedMaxLength)
304 FORWARD_LAYER_SUPPORT_FUNC(backend, IsPermuteSupported, input, output, descriptor);
307 bool IsPooling2dSupported(const BackendId& backend,
308 const TensorInfo& input,
309 const TensorInfo& output,
310 const Pooling2dDescriptor& descriptor,
311 char* reasonIfUnsupported,
312 size_t reasonIfUnsupportedMaxLength)
314 FORWARD_LAYER_SUPPORT_FUNC(backend, IsPooling2dSupported, input, output, descriptor);
317 bool IsResizeBilinearSupported(const BackendId& backend,
318 const TensorInfo& input,
319 char* reasonIfUnsupported,
320 size_t reasonIfUnsupportedMaxLength)
322 FORWARD_LAYER_SUPPORT_FUNC(backend, IsResizeBilinearSupported, input);
325 bool IsSoftmaxSupported(const BackendId& backend,
326 const TensorInfo& input,
327 const TensorInfo& output,
328 const SoftmaxDescriptor& descriptor,
329 char* reasonIfUnsupported,
330 size_t reasonIfUnsupportedMaxLength)
332 FORWARD_LAYER_SUPPORT_FUNC(backend, IsSoftmaxSupported, input, output, descriptor);
335 bool IsSpaceToBatchNdSupported(const BackendId& backend,
336 const TensorInfo& input,
337 const TensorInfo& output,
338 const SpaceToBatchNdDescriptor& descriptor,
339 char* reasonIfUnsupported,
340 size_t reasonIfUnsupportedMaxLength)
342 FORWARD_LAYER_SUPPORT_FUNC(backend, IsSpaceToBatchNdSupported, input, output, descriptor);
345 bool IsSplitterSupported(const BackendId& backend,
346 const TensorInfo& input,
347 const ViewsDescriptor& descriptor,
348 char* reasonIfUnsupported,
349 size_t reasonIfUnsupportedMaxLength)
351 FORWARD_LAYER_SUPPORT_FUNC(backend, IsSplitterSupported, input, descriptor);
354 bool IsFakeQuantizationSupported(const BackendId& backend,
355 const TensorInfo& input,
356 const FakeQuantizationDescriptor& descriptor,
357 char* reasonIfUnsupported,
358 size_t reasonIfUnsupportedMaxLength)
360 FORWARD_LAYER_SUPPORT_FUNC(backend, IsFakeQuantizationSupported, input, descriptor);
363 bool IsReshapeSupported(const BackendId& backend,
364 const TensorInfo& input,
365 char* reasonIfUnsupported,
366 size_t reasonIfUnsupportedMaxLength)
368 FORWARD_LAYER_SUPPORT_FUNC(backend, IsReshapeSupported, input);
371 bool IsFloorSupported(const BackendId& backend,
372 const TensorInfo& input,
373 const TensorInfo& output,
374 char* reasonIfUnsupported,
375 size_t reasonIfUnsupportedMaxLength)
377 // By definition (that is, regardless of compute device), shapes and data type must match.
378 if (input.GetShape() != output.GetShape() || input.GetDataType() != output.GetDataType())
383 FORWARD_LAYER_SUPPORT_FUNC(backend, IsFloorSupported, input, output);
386 bool IsMeanSupported(const BackendId& backend,
387 const TensorInfo& input,
388 const TensorInfo& output,
389 const MeanDescriptor& descriptor,
390 char* reasonIfUnsupported,
391 size_t reasonIfUnsupportedMaxLength)
393 FORWARD_LAYER_SUPPORT_FUNC(backend, IsMeanSupported, input, output, descriptor);
396 bool IsPadSupported(const BackendId& backend,
397 const TensorInfo& input,
398 const TensorInfo& output,
399 const PadDescriptor& descriptor,
400 char* reasonIfUnsupported,
401 size_t reasonIfUnsupportedMaxLength)
404 FORWARD_LAYER_SUPPORT_FUNC(backend, IsPadSupported, input, output, descriptor);
407 bool IsStridedSliceSupported(const BackendId& backend,
408 const TensorInfo& input,
409 const TensorInfo& output,
410 const StridedSliceDescriptor& descriptor,
411 char* reasonIfUnsupported,
412 size_t reasonIfUnsupportedMaxLength)
414 FORWARD_LAYER_SUPPORT_FUNC(backend, IsStridedSliceSupported, input, output, descriptor);