2 * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
20 #include "ir/DataType.h"
21 #include "ir/operation/ElementwiseActivation.h"
27 ::arm_compute::DataLayout asDataLayout(onert::ir::Layout layout)
31 case onert::ir::Layout::NHWC:
32 return ::arm_compute::DataLayout::NHWC;
33 case onert::ir::Layout::NCHW:
34 return ::arm_compute::DataLayout::NCHW;
36 return ::arm_compute::DataLayout::UNKNOWN;
49 ::arm_compute::TensorShape asTensorShape(const ir::Shape &shape, ir::Layout frontend_layout,
50 ir::Layout backend_layout, bool apply_dim_correction)
52 // If shape's rank is 0, the tensor is a scalar
53 // Sometimes, some ACL kernel can use a scalar as tensor. But ACL does not allocate buffer for
54 // tensor having rank as 0.
55 const auto tensor_shape = shape.rank() == 0 ? ir::Shape{1} : shape;
57 const uint32_t rank = tensor_shape.rank();
59 ::arm_compute::TensorShape res{};
61 res.set_num_dimensions(rank);
63 for (uint32_t axis = 0; axis < rank; ++axis)
65 // NOTE In some cases, in incorrect dimensions is required.
66 // For example, intput_size is 1 in LSTM. The input-to-input weights([num_units, input_size]) of
67 // LSTM is used as the weight of the FullyConnected.
68 // The FullyConnected's weight must be greater or equal than 2-dimensions.
69 // However, if the dimension correction is applied to input_to_input_weights with input_size
70 // equal to 1, it will be changed to 1-D.
71 // So input_to_input_weights is not used by the weight of FullyConnected.
72 res.set(ToARMComputeAxis(rank, axis, frontend_layout, backend_layout).value(),
73 tensor_shape.dim(axis), apply_dim_correction);
79 ::arm_compute::Coordinates asTensorCoordinate(const ir::Coordinates &coord,
80 ir::Layout frontend_layout, ir::Layout backend_layout)
82 const uint32_t rank = coord.size();
84 ::arm_compute::Coordinates res{};
86 res.set_num_dimensions(rank);
88 for (uint32_t axis = 0; axis < rank; ++axis)
90 res.set(ToARMComputeAxis(rank, axis, frontend_layout, backend_layout).value(), coord[axis]);
96 ::arm_compute::DataType asDataType(const ir::DataType type)
100 case ir::DataType::FLOAT32:
101 return ::arm_compute::DataType::F32;
102 case ir::DataType::INT32:
103 return ::arm_compute::DataType::S32;
104 case ir::DataType::UINT32:
105 return ::arm_compute::DataType::U32;
106 case ir::DataType::QUANT_UINT8_ASYMM:
107 return ::arm_compute::DataType::QASYMM8;
108 case ir::DataType::BOOL8:
109 case ir::DataType::UINT8:
110 return ::arm_compute::DataType::U8;
111 case ir::DataType::QUANT_INT8_SYMM:
112 return ::arm_compute::DataType::QSYMM8;
113 case ir::DataType::QUANT_INT8_ASYMM:
114 return ::arm_compute::DataType::QASYMM8_SIGNED;
115 case ir::DataType::FLOAT16:
116 return ::arm_compute::DataType::F16;
117 case ir::DataType::INT64:
118 return ::arm_compute::DataType::S64;
119 case ir::DataType::QUANT_INT16_ASYMM:
120 return ::arm_compute::DataType::QASYMM16;
121 case ir::DataType::QUANT_INT8_SYMM_PER_CHANNEL:
122 return ::arm_compute::DataType::QSYMM8_PER_CHANNEL;
124 throw std::runtime_error("Not supported internal data type, yet");
129 ::arm_compute::QuantizationInfo asQuantizationInfo(const float scale, const int32_t offset)
131 return ::arm_compute::QuantizationInfo(scale, offset);
134 ::arm_compute::TensorInfo asTensorInfo(const ir::Shape &shape, const ir::TypeInfo &typeInfo,
135 ir::Layout frontend_layout, ir::Layout backend_layout,
136 bool apply_dim_correction)
138 ::arm_compute::TensorInfo info(
139 asTensorShape(shape, frontend_layout, backend_layout, apply_dim_correction), 1,
140 asDataType(typeInfo.type()), asQuantizationInfo(typeInfo.scale(), typeInfo.offset()));
141 info.set_data_layout(asDataLayout(backend_layout));
145 ::arm_compute::PadStrideInfo asPadStrideInfo(const ir::ExplicitPadding &padding,
146 const ir::Stride &stride)
148 return ::arm_compute::PadStrideInfo{stride.horizontal,
154 ::arm_compute::DimensionRoundingType::FLOOR};
157 ::arm_compute::ActivationLayerInfo asActivationLayerInfo(const ir::Activation act_code)
161 case ir::Activation::NONE:
162 return ::arm_compute::ActivationLayerInfo{};
163 case ir::Activation::RELU:
164 return ::arm_compute::ActivationLayerInfo{
165 ::arm_compute::ActivationLayerInfo::ActivationFunction::RELU};
166 case ir::Activation::RELU1:
167 return ::arm_compute::ActivationLayerInfo{
168 ::arm_compute::ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 1.0f, -1.0f};
169 case ir::Activation::RELU6:
170 return ::arm_compute::ActivationLayerInfo{
171 ::arm_compute::ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 6.0f, 0.0f};
172 // Cases for activation of LSTM.
173 case ir::Activation::TANH:
174 return ::arm_compute::ActivationLayerInfo{
175 ::arm_compute::ActivationLayerInfo::ActivationFunction::TANH, 1.0f, 1.0f};
176 case ir::Activation::SIGMOID:
177 // NOTE The sigmoid function is a special case of the Logistic function when L=1, k=1, x0=0.
178 // TODO In ACL and nnapi sepc, currently, Logistic's L always is 1, k always is 1, x0 always
179 // 0(always sigmoid) regardless of values of the parameter.
180 // If ACL support non-sigmoid logistic, should fix param values.
181 return ::arm_compute::ActivationLayerInfo{
182 ::arm_compute::ActivationLayerInfo::ActivationFunction::LOGISTIC, 0.0f, 0.0f};
184 throw std::runtime_error{"Not supported internal activation, yet"};
189 ::arm_compute::ActivationLayerInfo
190 asActivationLayerInfo(const ir::operation::ElementwiseActivation::Type op_type, float alpha,
195 case ir::operation::ElementwiseActivation::Type::RELU:
198 if (alpha == ir::operation::ElementwiseActivation::infinity)
200 return ::arm_compute::ActivationLayerInfo{
201 ::arm_compute::ActivationLayerInfo::ActivationFunction::RELU};
205 return ::arm_compute::ActivationLayerInfo{
206 ::arm_compute::ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, alpha};
211 return ::arm_compute::ActivationLayerInfo{
212 ::arm_compute::ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, alpha, beta};
214 case ir::operation::ElementwiseActivation::Type::TANH:
215 return ::arm_compute::ActivationLayerInfo{
216 ::arm_compute::ActivationLayerInfo::ActivationFunction::TANH, alpha, beta};
217 case ir::operation::ElementwiseActivation::Type::LOGISTIC:
218 // NOTE The sigmoid function is a special case of the Logistic function when L=1, k=1, x0=0.
219 // TODO In ACL and nnapi sepc, currently, Logistic's L always is 1, k always is 1, x0 always
220 // 0(always sigmoid) regardless of values of the parameter.
221 // If ACL support non-sigmoid logistic, should fix param values.
222 return ::arm_compute::ActivationLayerInfo{
223 ::arm_compute::ActivationLayerInfo::ActivationFunction::LOGISTIC};
224 case ir::operation::ElementwiseActivation::Type::LEAKY_RELU:
225 return ::arm_compute::ActivationLayerInfo{
226 ::arm_compute::ActivationLayerInfo::ActivationFunction::LEAKY_RELU, alpha};
228 throw std::runtime_error{"Not supported internal elementwise activation, yet"};
233 arm_compute::Coordinates asCoordinates(const ir::Operand &operand, int32_t rank,
234 ir::Layout frontend_layout, ir::Layout backend_layout)
236 std::set<uint32_t> axes = asSet(operand, rank, frontend_layout, backend_layout);
238 arm_compute::Coordinates reduce_axes;
239 for (const int32_t axis : axes)
241 reduce_axes.set(reduce_axes.num_dimensions(), axis);
247 std::set<uint32_t> asSet(const ir::Operand &operand, int32_t rank, ir::Layout frontend_layout,
248 ir::Layout backend_layout)
250 std::set<std::uint32_t> axes;
252 for (size_t i = 0; i < operand.shape().num_elements(); ++i)
255 switch (operand.typeInfo().type())
257 case ir::DataType::INT32:
258 axis = reinterpret_cast<const int32_t *>(operand.data()->base())[i];
260 case ir::DataType::INT64:
261 axis = reinterpret_cast<const int64_t *>(operand.data()->base())[i];
264 throw std::runtime_error("acl_common::asSet: Not supported data type");
268 axes.insert(ToARMComputeAxis(rank, axis, frontend_layout, backend_layout).value());
274 std::unique_ptr<AclFunction> asAclFunction(std::unique_ptr<::arm_compute::IFunction> &&layer)
276 return std::make_unique<AclFunction>(std::move(layer));
279 ir::Layout asRuntimeLayout(::arm_compute::DataLayout data_layout)
283 case ::arm_compute::DataLayout::NHWC:
284 return ir::Layout::NHWC;
285 case ::arm_compute::DataLayout::NCHW:
286 return ir::Layout::NCHW;
288 return ir::Layout::UNKNOWN;
292 ir::DataType asRuntimeDataType(::arm_compute::DataType data_type)
296 case ::arm_compute::DataType::F32:
297 return ir::DataType::FLOAT32;
298 case ::arm_compute::DataType::S32:
299 return ir::DataType::INT32;
300 case ::arm_compute::DataType::U32:
301 return ir::DataType::UINT32;
302 case ::arm_compute::DataType::QASYMM8:
303 return ir::DataType::QUANT_UINT8_ASYMM;
304 case ::arm_compute::DataType::QASYMM8_SIGNED:
305 return ir::DataType::QUANT_INT8_ASYMM;
306 case ::arm_compute::DataType::U8:
307 return ir::DataType::UINT8;
308 case ::arm_compute::DataType::QSYMM8:
309 return ir::DataType::QUANT_INT8_SYMM;
310 case ::arm_compute::DataType::F16:
311 return ir::DataType::FLOAT16;
312 case ::arm_compute::DataType::S64:
313 return ir::DataType::INT64;
315 throw std::runtime_error{"Not supported acl data type, yet"};
320 arm_compute::PoolingType convertPoolType(ir::operation::Pool2D::PoolType pool_type_ir)
322 switch (pool_type_ir)
324 case ir::operation::Pool2D::PoolType::AVG:
325 return arm_compute::PoolingType::AVG;
326 case ir::operation::Pool2D::PoolType::L2:
327 return arm_compute::PoolingType::L2;
328 case ir::operation::Pool2D::PoolType::MAX:
329 return arm_compute::PoolingType::MAX;
331 throw std::runtime_error("convertPoolType: Not supported operation yet");
335 arm_compute::ReductionOperation convertReduceType(ir::operation::Reduce::ReduceType reduce_type_ir)
337 switch (reduce_type_ir)
339 case ir::operation::Reduce::ReduceType::MAX:
340 return arm_compute::ReductionOperation::MAX;
341 case ir::operation::Reduce::ReduceType::MIN:
342 return arm_compute::ReductionOperation::MIN;
343 case ir::operation::Reduce::ReduceType::SUM:
344 return arm_compute::ReductionOperation::SUM;
346 throw std::runtime_error("convertReduceType: Not supported operation yet");
350 arm_compute::PixelValue asPixelValue(const ir::Operand &operand)
352 assert(operand.isConstant());
353 assert(operand.shape().num_elements() == 1);
354 switch (operand.typeInfo().type())
356 case ir::DataType::INT32:
357 return arm_compute::PixelValue(operand.asScalar<int32_t>());
358 case ir::DataType::INT64:
359 return arm_compute::PixelValue(operand.asScalar<int64_t>());
360 case ir::DataType::UINT32:
361 return arm_compute::PixelValue(operand.asScalar<uint64_t>());
362 case ir::DataType::UINT8:
363 return arm_compute::PixelValue(operand.asScalar<uint8_t>());
364 case ir::DataType::FLOAT32:
365 return arm_compute::PixelValue(operand.asScalar<float>());
367 throw std::runtime_error("asPixelValue : Not supported datatype yet");
371 arm_compute::Size2D asDilation(uint32_t dilation_width, uint32_t dilation_height)
373 assert(dilation_width != 0);
374 assert(dilation_height != 0);
376 return arm_compute::Size2D(dilation_width, dilation_height);
379 } // namespace acl_common
380 } // namespace backend