2 * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
20 #include "ir/DataType.h"
21 #include "ir/operation/ElementwiseActivation.h"
27 ::arm_compute::DataLayout asDataLayout(onert::ir::Layout layout)
31 case onert::ir::Layout::NHWC:
32 return ::arm_compute::DataLayout::NHWC;
33 case onert::ir::Layout::NCHW:
34 return ::arm_compute::DataLayout::NCHW;
36 return ::arm_compute::DataLayout::UNKNOWN;
49 ::arm_compute::TensorShape asTensorShape(const ir::Shape &shape, ir::Layout frontend_layout,
50 ir::Layout backend_layout, bool apply_dim_correction)
52 // If shape's rank is 0, the tensor is a scalar
53 // Sometimes, some ACL kernel can use a scalar as tensor. But ACL does not allocate buffer for
54 // tensor having rank as 0.
55 const auto tensor_shape = shape.rank() == 0 ? ir::Shape{1} : shape;
57 const uint32_t rank = tensor_shape.rank();
59 ::arm_compute::TensorShape res{};
61 res.set_num_dimensions(rank);
63 for (uint32_t axis = 0; axis < rank; ++axis)
65 // NOTE In some cases, in incorrect dimensions is required.
66 // For example, intput_size is 1 in LSTM. The input-to-input weights([num_units, input_size]) of
67 // LSTM is used as the weight of the FullyConnected.
68 // The FullyConnected's weight must be greater or equal than 2-dimensions.
69 // However, if the dimension correction is applied to input_to_input_weights with input_size
70 // equal to 1, it will be changed to 1-D.
71 // So input_to_input_weights is not used by the weight of FullyConnected.
72 res.set(ToARMComputeAxis(rank, axis, frontend_layout, backend_layout).value(),
73 tensor_shape.dim(axis), apply_dim_correction);
79 ::arm_compute::Coordinates asTensorCoordinate(const ir::Coordinates &coord,
80 ir::Layout frontend_layout, ir::Layout backend_layout)
82 const uint32_t rank = coord.size();
84 ::arm_compute::Coordinates res{};
86 res.set_num_dimensions(rank);
88 for (uint32_t axis = 0; axis < rank; ++axis)
90 res.set(ToARMComputeAxis(rank, axis, frontend_layout, backend_layout).value(), coord[axis]);
96 ::arm_compute::DataType asDataType(const ir::DataType type)
100 case ir::DataType::FLOAT32:
101 return ::arm_compute::DataType::F32;
102 case ir::DataType::INT32:
103 return ::arm_compute::DataType::S32;
104 case ir::DataType::UINT32:
105 return ::arm_compute::DataType::U32;
106 case ir::DataType::QUANT_UINT8_ASYMM:
107 return ::arm_compute::DataType::QASYMM8;
108 case ir::DataType::BOOL8:
109 case ir::DataType::UINT8:
110 return ::arm_compute::DataType::U8;
111 case ir::DataType::QUANT_INT8_SYMM:
112 return ::arm_compute::DataType::S8;
113 case ir::DataType::FLOAT16:
114 return ::arm_compute::DataType::F16;
115 case ir::DataType::INT64:
116 return ::arm_compute::DataType::S64;
118 throw std::runtime_error("Not supported, yet");
123 ::arm_compute::QuantizationInfo asQuantizationInfo(const float scale, const int32_t offset)
125 return ::arm_compute::QuantizationInfo(scale, offset);
128 ::arm_compute::TensorInfo asTensorInfo(const ir::Shape &shape, const ir::TypeInfo &typeInfo,
129 ir::Layout frontend_layout, ir::Layout backend_layout,
130 bool apply_dim_correction)
132 ::arm_compute::TensorInfo info(
133 asTensorShape(shape, frontend_layout, backend_layout, apply_dim_correction), 1,
134 asDataType(typeInfo.type()), asQuantizationInfo(typeInfo.scale(), typeInfo.offset()));
135 info.set_data_layout(asDataLayout(backend_layout));
139 ::arm_compute::PadStrideInfo asPadStrideInfo(const ir::ExplicitPadding &padding,
140 const ir::Stride &stride)
142 return ::arm_compute::PadStrideInfo{stride.horizontal,
148 ::arm_compute::DimensionRoundingType::FLOOR};
151 ::arm_compute::ActivationLayerInfo asActivationLayerInfo(const ir::Activation act_code)
155 case ir::Activation::NONE:
156 return ::arm_compute::ActivationLayerInfo{};
157 case ir::Activation::RELU:
158 return ::arm_compute::ActivationLayerInfo{
159 ::arm_compute::ActivationLayerInfo::ActivationFunction::RELU};
160 case ir::Activation::RELU1:
161 return ::arm_compute::ActivationLayerInfo{
162 ::arm_compute::ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 1.0f, -1.0f};
163 case ir::Activation::RELU6:
164 return ::arm_compute::ActivationLayerInfo{
165 ::arm_compute::ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 6.0f, 0.0f};
166 // Cases for activation of LSTM.
167 case ir::Activation::TANH:
168 return ::arm_compute::ActivationLayerInfo{
169 ::arm_compute::ActivationLayerInfo::ActivationFunction::TANH, 1.0f, 1.0f};
170 case ir::Activation::SIGMOID:
171 // NOTE The sigmoid function is a special case of the Logistic function when L=1, k=1, x0=0.
172 // TODO In ACL and nnapi sepc, currently, Logistic's L always is 1, k always is 1, x0 always
173 // 0(always sigmoid) regardless of values of the parameter.
174 // If ACL support non-sigmoid logistic, should fix param values.
175 return ::arm_compute::ActivationLayerInfo{
176 ::arm_compute::ActivationLayerInfo::ActivationFunction::LOGISTIC, 0.0f, 0.0f};
178 throw std::runtime_error{"Not supported, yet"};
183 ::arm_compute::ActivationLayerInfo
184 asActivationLayerInfo(const ir::operation::ElementwiseActivation::Type op_type, float alpha,
189 case ir::operation::ElementwiseActivation::Type::RELU:
192 if (alpha == ir::operation::ElementwiseActivation::infinity)
194 return ::arm_compute::ActivationLayerInfo{
195 ::arm_compute::ActivationLayerInfo::ActivationFunction::RELU};
199 return ::arm_compute::ActivationLayerInfo{
200 ::arm_compute::ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, alpha};
205 return ::arm_compute::ActivationLayerInfo{
206 ::arm_compute::ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, alpha, beta};
208 case ir::operation::ElementwiseActivation::Type::TANH:
209 return ::arm_compute::ActivationLayerInfo{
210 ::arm_compute::ActivationLayerInfo::ActivationFunction::TANH, alpha, beta};
211 case ir::operation::ElementwiseActivation::Type::LOGISTIC:
212 // NOTE The sigmoid function is a special case of the Logistic function when L=1, k=1, x0=0.
213 // TODO In ACL and nnapi sepc, currently, Logistic's L always is 1, k always is 1, x0 always
214 // 0(always sigmoid) regardless of values of the parameter.
215 // If ACL support non-sigmoid logistic, should fix param values.
216 return ::arm_compute::ActivationLayerInfo{
217 ::arm_compute::ActivationLayerInfo::ActivationFunction::LOGISTIC};
218 case ir::operation::ElementwiseActivation::Type::LEAKY_RELU:
219 return ::arm_compute::ActivationLayerInfo{
220 ::arm_compute::ActivationLayerInfo::ActivationFunction::LEAKY_RELU, alpha};
222 throw std::runtime_error{"Not supported, yet"};
227 arm_compute::Coordinates asCoordinates(const ir::Operand &operand, int32_t rank,
228 ir::Layout frontend_layout, ir::Layout backend_layout)
230 std::set<uint32_t> axes = asSet(operand, rank, frontend_layout, backend_layout);
232 arm_compute::Coordinates reduce_axes;
233 for (const int32_t axis : axes)
235 reduce_axes.set(reduce_axes.num_dimensions(), axis);
241 std::set<uint32_t> asSet(const ir::Operand &operand, int32_t rank, ir::Layout frontend_layout,
242 ir::Layout backend_layout)
244 std::set<std::uint32_t> axes;
246 for (size_t i = 0; i < operand.shape().num_elements(); ++i)
249 switch (operand.typeInfo().type())
251 case ir::DataType::INT32:
252 axis = reinterpret_cast<const int32_t *>(operand.data()->base())[i];
254 case ir::DataType::INT64:
255 axis = reinterpret_cast<const int64_t *>(operand.data()->base())[i];
258 throw std::runtime_error("acl_common::asSet: Not supported data type");
262 axes.insert(ToARMComputeAxis(rank, axis, frontend_layout, backend_layout).value());
268 std::unique_ptr<AclFunction> asAclFunction(std::unique_ptr<::arm_compute::IFunction> &&layer)
270 return std::make_unique<AclFunction>(std::move(layer));
273 ir::Layout asRuntimeLayout(::arm_compute::DataLayout data_layout)
277 case ::arm_compute::DataLayout::NHWC:
278 return ir::Layout::NHWC;
279 case ::arm_compute::DataLayout::NCHW:
280 return ir::Layout::NCHW;
282 return ir::Layout::UNKNOWN;
286 ir::DataType asRuntimeDataType(::arm_compute::DataType data_type)
290 case ::arm_compute::DataType::F32:
291 return ir::DataType::FLOAT32;
292 case ::arm_compute::DataType::S32:
293 return ir::DataType::INT32;
294 case ::arm_compute::DataType::U32:
295 return ir::DataType::UINT32;
296 case ::arm_compute::DataType::QASYMM8:
297 return ir::DataType::QUANT_UINT8_ASYMM;
298 case ::arm_compute::DataType::U8:
299 return ir::DataType::UINT8;
300 case ::arm_compute::DataType::QSYMM8:
301 return ir::DataType::QUANT_INT8_SYMM;
302 case ::arm_compute::DataType::F16:
303 return ir::DataType::FLOAT16;
304 case ::arm_compute::DataType::S64:
305 return ir::DataType::INT64;
307 throw std::runtime_error{"Not supported, yet"};
312 arm_compute::PoolingType convertPoolType(ir::operation::Pool2D::PoolType pool_type_ir)
314 switch (pool_type_ir)
316 case ir::operation::Pool2D::PoolType::AVG:
317 return arm_compute::PoolingType::AVG;
318 case ir::operation::Pool2D::PoolType::L2:
319 return arm_compute::PoolingType::L2;
320 case ir::operation::Pool2D::PoolType::MAX:
321 return arm_compute::PoolingType::MAX;
323 throw std::runtime_error("convertPoolType: Not supported operation yet");
327 arm_compute::ReductionOperation convertReduceType(ir::operation::Reduce::ReduceType reduce_type_ir)
329 switch (reduce_type_ir)
331 case ir::operation::Reduce::ReduceType::MAX:
332 return arm_compute::ReductionOperation::MAX;
333 case ir::operation::Reduce::ReduceType::MIN:
334 return arm_compute::ReductionOperation::MIN;
335 case ir::operation::Reduce::ReduceType::SUM:
336 return arm_compute::ReductionOperation::SUM;
338 throw std::runtime_error("convertReduceType: Not supported operation yet");
342 arm_compute::PixelValue asPixelValue(const ir::Operand &operand)
344 assert(operand.isConstant());
345 assert(operand.shape().num_elements() == 1);
346 switch (operand.typeInfo().type())
348 case ir::DataType::INT32:
349 return arm_compute::PixelValue(operand.asScalar<int32_t>());
350 case ir::DataType::INT64:
351 return arm_compute::PixelValue(operand.asScalar<int64_t>());
352 case ir::DataType::UINT32:
353 return arm_compute::PixelValue(operand.asScalar<uint64_t>());
354 case ir::DataType::UINT8:
355 return arm_compute::PixelValue(operand.asScalar<uint8_t>());
356 case ir::DataType::FLOAT32:
357 return arm_compute::PixelValue(operand.asScalar<float>());
359 throw std::runtime_error("asPixelValue : Not supported datatype yet");
363 arm_compute::Size2D asDilation(uint32_t dilation_width, uint32_t dilation_height)
365 assert(dilation_width != 0);
366 assert(dilation_height != 0);
368 return arm_compute::Size2D(dilation_width, dilation_height);
371 } // namespace acl_common
372 } // namespace backend