2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // See LICENSE file in the project root for full license information.
5 #include "armnn/LayerSupport.hpp"
7 #include "backends/RefLayerSupport.hpp"
8 #include "backends/NeonLayerSupport.hpp"
9 #include "backends/ClLayerSupport.hpp"
11 #include <boost/assert.hpp>
19 /// Helper function to copy a full string to a truncated version.
20 void CopyErrorMessage(char* truncatedString, const char* fullString, size_t maxLength)
22 if(truncatedString != nullptr)
24 size_t copyLength = std::min(maxLength, strlen(fullString));
25 std::strncpy(truncatedString, fullString, copyLength);
26 // Ensure null-terminated string.
27 truncatedString[copyLength] = '\0';
31 // Helper macro to avoid code duplication.
32 // Forwards function func to funcRef, funcNeon or funcCl, depending on the value of compute.
33 #define FORWARD_LAYER_SUPPORT_FUNC(compute, func, ...) \
34 std::string reasonIfUnsupportedFull; \
38 case Compute::CpuRef: \
39 isSupported = func##Ref(__VA_ARGS__, &reasonIfUnsupportedFull); \
41 case Compute::CpuAcc: \
42 isSupported = func##Neon(__VA_ARGS__, &reasonIfUnsupportedFull); \
44 case Compute::GpuAcc: \
45 isSupported = func##Cl(__VA_ARGS__, &reasonIfUnsupportedFull); \
48 isSupported = func##Ref(__VA_ARGS__, &reasonIfUnsupportedFull); \
51 CopyErrorMessage(reasonIfUnsupported, reasonIfUnsupportedFull.c_str(), reasonIfUnsupportedMaxLength); \
54 bool CheckTensorDataTypesEqual(const TensorInfo& input0, const TensorInfo& input1)
56 return input0.GetDataType() == input1.GetDataType();
59 bool IsActivationSupported(Compute compute,
60 const TensorInfo& input,
61 const TensorInfo& output,
62 const ActivationDescriptor& descriptor,
63 char* reasonIfUnsupported,
64 size_t reasonIfUnsupportedMaxLength)
66 FORWARD_LAYER_SUPPORT_FUNC(compute, IsActivationSupported, input, output, descriptor);
69 bool IsAdditionSupported(Compute compute,
70 const TensorInfo& input0,
71 const TensorInfo& input1,
72 const TensorInfo& output,
73 char* reasonIfUnsupported,
74 size_t reasonIfUnsupportedMaxLength)
76 if(!CheckTensorDataTypesEqual(input0, input1))
81 FORWARD_LAYER_SUPPORT_FUNC(compute, IsAdditionSupported, input0, input1, output);
84 bool IsBatchNormalizationSupported(Compute compute,
85 const TensorInfo& input,
86 const TensorInfo& output,
87 const TensorInfo& mean,
88 const TensorInfo& var,
89 const TensorInfo& beta,
90 const TensorInfo& gamma,
91 const BatchNormalizationDescriptor& descriptor,
92 char* reasonIfUnsupported,
93 size_t reasonIfUnsupportedMaxLength)
95 FORWARD_LAYER_SUPPORT_FUNC(compute,
96 IsBatchNormalizationSupported,
106 bool IsConstantSupported(Compute compute,
107 const TensorInfo& output,
108 char* reasonIfUnsupported,
109 size_t reasonIfUnsupportedMaxLength)
111 FORWARD_LAYER_SUPPORT_FUNC(compute, IsConstantSupported, output);
114 bool IsConvertFp16ToFp32Supported(Compute compute,
115 const TensorInfo& input,
116 const TensorInfo& output,
117 char* reasonIfUnsupported,
118 size_t reasonIfUnsupportedMaxLength)
120 FORWARD_LAYER_SUPPORT_FUNC(compute, IsConvertFp16ToFp32Supported, input, output);
123 bool IsConvertFp32ToFp16Supported(Compute compute,
124 const TensorInfo& input,
125 const TensorInfo& output,
126 char* reasonIfUnsupported,
127 size_t reasonIfUnsupportedMaxLength)
129 FORWARD_LAYER_SUPPORT_FUNC(compute, IsConvertFp32ToFp16Supported, input, output);
132 bool IsConvolution2dSupported(Compute compute,
133 const TensorInfo& input,
134 const TensorInfo& output,
135 const Convolution2dDescriptor& descriptor,
136 const TensorInfo& weights,
137 const TensorInfo& biases,
138 char* reasonIfUnsupported,
139 size_t reasonIfUnsupportedMaxLength)
141 FORWARD_LAYER_SUPPORT_FUNC(compute, IsConvolution2dSupported, input, output, descriptor, weights, biases);
144 bool IsDepthwiseConvolutionSupported(Compute compute,
145 const TensorInfo& input,
146 const TensorInfo& output,
147 const DepthwiseConvolution2dDescriptor& descriptor,
148 const TensorInfo& weights,
149 const TensorInfo& biases,
150 char* reasonIfUnsupported,
151 size_t reasonIfUnsupportedMaxLength)
153 FORWARD_LAYER_SUPPORT_FUNC(compute, IsDepthwiseConvolutionSupported, input, output, descriptor, weights, biases);
156 bool IsInputSupported(Compute compute,
157 const TensorInfo& input,
158 char* reasonIfUnsupported,
159 size_t reasonIfUnsupportedMaxLength)
161 FORWARD_LAYER_SUPPORT_FUNC(compute, IsInputSupported, input);
164 bool IsFullyConnectedSupported(Compute compute,
165 const TensorInfo& input,
166 const TensorInfo& output,
167 const TensorInfo& weights,
168 const TensorInfo& biases,
169 const FullyConnectedDescriptor& descriptor,
170 char* reasonIfUnsupported,
171 size_t reasonIfUnsupportedMaxLength)
173 FORWARD_LAYER_SUPPORT_FUNC(compute, IsFullyConnectedSupported, input, output, weights, biases, descriptor);
176 bool IsL2NormalizationSupported(Compute compute,
177 const TensorInfo& input,
178 const TensorInfo& output,
179 char* reasonIfUnsupported,
180 size_t reasonIfUnsupportedMaxLength)
182 FORWARD_LAYER_SUPPORT_FUNC(compute, IsL2NormalizationSupported, input, output);
185 bool IsLstmSupported(Compute compute, const TensorInfo& input, const TensorInfo& outputStateIn,
186 const TensorInfo& cellStateIn, const TensorInfo& scratchBuffer,
187 const TensorInfo& outputStateOut, const TensorInfo& cellStateOut,
188 const TensorInfo& output, const LstmDescriptor& descriptor,
189 const TensorInfo& inputToForgetWeights, const TensorInfo& inputToCellWeights,
190 const TensorInfo& inputToOutputWeights, const TensorInfo& recurrentToForgetWeights,
191 const TensorInfo& recurrentToCellWeights, const TensorInfo& recurrentToOutputWeights,
192 const TensorInfo& forgetGateBias, const TensorInfo& cellBias,
193 const TensorInfo& outputGateBias, const TensorInfo* inputToInputWeights,
194 const TensorInfo* recurrentToInputWeights, const TensorInfo* cellToInputWeights,
195 const TensorInfo* inputGateBias, const TensorInfo* projectionWeights,
196 const TensorInfo* projectionBias, const TensorInfo* cellToForgetWeights,
197 const TensorInfo* cellToOutputWeights, char* reasonIfUnsupported,
198 size_t reasonIfUnsupportedMaxLength)
201 FORWARD_LAYER_SUPPORT_FUNC(compute, IsLstmSupported, input, outputStateIn, cellStateIn,
202 scratchBuffer, outputStateOut, cellStateOut,
203 output, descriptor, inputToForgetWeights, inputToCellWeights,
204 inputToOutputWeights, recurrentToForgetWeights,
205 recurrentToCellWeights, recurrentToOutputWeights,
206 forgetGateBias, cellBias, outputGateBias,
207 inputToInputWeights, recurrentToInputWeights,
208 cellToInputWeights, inputGateBias, projectionWeights,
209 projectionBias, cellToForgetWeights, cellToOutputWeights);
211 bool IsMergerSupported(Compute compute,
212 std::vector<const TensorInfo*> inputs,
213 const OriginsDescriptor& descriptor,
214 char* reasonIfUnsupported,
215 size_t reasonIfUnsupportedMaxLength)
217 BOOST_ASSERT(inputs.size() > 0);
218 FORWARD_LAYER_SUPPORT_FUNC(compute, IsMergerSupported, inputs, descriptor);
221 bool IsMultiplicationSupported(Compute compute,
222 const TensorInfo& input0,
223 const TensorInfo& input1,
224 const TensorInfo& output,
225 char* reasonIfUnsupported,
226 size_t reasonIfUnsupportedMaxLength)
228 FORWARD_LAYER_SUPPORT_FUNC(compute, IsMultiplicationSupported, input0, input1, output);
231 bool IsNormalizationSupported(Compute compute,
232 const TensorInfo& input,
233 const TensorInfo& output,
234 const NormalizationDescriptor& descriptor,
235 char* reasonIfUnsupported,
236 size_t reasonIfUnsupportedMaxLength)
238 FORWARD_LAYER_SUPPORT_FUNC(compute, IsNormalizationSupported, input, output, descriptor);
241 bool IsOutputSupported(Compute compute,
242 const TensorInfo& output,
243 char* reasonIfUnsupported,
244 size_t reasonIfUnsupportedMaxLength)
246 FORWARD_LAYER_SUPPORT_FUNC(compute, IsOutputSupported, output);
249 bool IsPermuteSupported(Compute compute,
250 const TensorInfo& input,
251 const TensorInfo& output,
252 const PermuteDescriptor& descriptor,
253 char* reasonIfUnsupported,
254 size_t reasonIfUnsupportedMaxLength)
256 FORWARD_LAYER_SUPPORT_FUNC(compute, IsPermuteSupported, input, output, descriptor);
259 bool IsPooling2dSupported(Compute compute,
260 const TensorInfo& input,
261 const TensorInfo& output,
262 const Pooling2dDescriptor& descriptor,
263 char* reasonIfUnsupported,
264 size_t reasonIfUnsupportedMaxLength)
266 FORWARD_LAYER_SUPPORT_FUNC(compute, IsPooling2dSupported, input, output, descriptor);
269 bool IsResizeBilinearSupported(Compute compute,
270 const TensorInfo& input,
271 char* reasonIfUnsupported,
272 size_t reasonIfUnsupportedMaxLength)
274 FORWARD_LAYER_SUPPORT_FUNC(compute, IsResizeBilinearSupported, input);
277 bool IsSoftmaxSupported(Compute compute,
278 const TensorInfo& input,
279 const TensorInfo& output,
280 const SoftmaxDescriptor& descriptor,
281 char* reasonIfUnsupported,
282 size_t reasonIfUnsupportedMaxLength)
284 FORWARD_LAYER_SUPPORT_FUNC(compute, IsSoftmaxSupported, input, output, descriptor);
287 bool IsSplitterSupported(Compute compute,
288 const TensorInfo& input,
289 const ViewsDescriptor& descriptor,
290 char* reasonIfUnsupported,
291 size_t reasonIfUnsupportedMaxLength)
293 FORWARD_LAYER_SUPPORT_FUNC(compute, IsSplitterSupported, input, descriptor);
296 bool IsFakeQuantizationSupported(Compute compute,
297 const TensorInfo& input,
298 const FakeQuantizationDescriptor& descriptor,
299 char* reasonIfUnsupported,
300 size_t reasonIfUnsupportedMaxLength)
302 FORWARD_LAYER_SUPPORT_FUNC(compute, IsFakeQuantizationSupported, input, descriptor);
305 bool IsReshapeSupported(Compute compute,
306 const TensorInfo& input,
307 char* reasonIfUnsupported,
308 size_t reasonIfUnsupportedMaxLength)
310 FORWARD_LAYER_SUPPORT_FUNC(compute, IsReshapeSupported, input);
313 bool IsFloorSupported(Compute compute,
314 const TensorInfo& input,
315 const TensorInfo& output,
316 char* reasonIfUnsupported,
317 size_t reasonIfUnsupportedMaxLength)
319 // By definition (that is, regardless of compute device), shapes and data type must match.
320 if (input.GetShape() != output.GetShape() || input.GetDataType() != output.GetDataType())
325 FORWARD_LAYER_SUPPORT_FUNC(compute, IsFloorSupported, input, output);