2 * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #ifndef __NNFW_SUPPORT_NNAPI_OPERATION_UTILS_H__
18 #define __NNFW_SUPPORT_NNAPI_OPERATION_UTILS_H__
20 #include <backend/IPortableTensor.h>
22 #include <cker/Shape.h>
23 #include <cker/Types.h>
25 #include <ir/DataType.h>
26 #include <ir/InternalType.h>
27 #include <ir/Operand.h>
28 #include <ir/Padding.h>
33 using OperandType = onert::ir::DataType;
66 uint32_t getNumberOfDimensions(const IPortableTensor *tensor);
68 uint32_t getNumberOfElements(const IPortableTensor *tensor);
70 uint32_t getSizeOfDimension(const IPortableTensor *tensor, uint32_t dimensionIdx);
72 inline nnfw::cker::Shape getExtendedTensorShape(const IPortableTensor *tensor)
75 const int32_t extended_rank = 4;
76 int32_t raw_shape[extended_rank];
77 uint32_t src = extended_rank - tensor->num_dimensions();
78 for (uint32_t i = 0; i < extended_rank; ++i)
86 raw_shape[i] = tensor->dimension(i - src);
90 return nnfw::cker::Shape(extended_rank, raw_shape);
93 inline nnfw::cker::Shape getTensorShape(const IPortableTensor *tensor)
95 if (tensor == nullptr)
96 return nnfw::cker::Shape();
98 assert(tensor->layout() == ir::Layout::NHWC);
99 constexpr int kMaxSmallSize = 8;
100 int32_t raw_shape_small[kMaxSmallSize];
101 std::vector<int32_t> raw_shape_vec;
102 auto rank = tensor->num_dimensions();
103 int32_t *data = nullptr;
104 if (rank > kMaxSmallSize)
106 raw_shape_vec.resize(rank);
107 data = raw_shape_vec.data();
111 data = raw_shape_small;
114 for (uint32_t i = 0; i < rank; ++i)
116 data[i] = tensor->dimension(i);
118 return nnfw::cker::Shape(rank, data);
121 inline nnfw::cker::FusedActivationFunctionType
122 convertActivationType(const ir::Activation activation)
126 case ir::Activation::NONE:
127 return nnfw::cker::FusedActivationFunctionType::kNone;
128 case ir::Activation::RELU:
129 return nnfw::cker::FusedActivationFunctionType::kRelu;
130 case ir::Activation::RELU1:
131 return nnfw::cker::FusedActivationFunctionType::kRelu1;
132 case ir::Activation::RELU6:
133 return nnfw::cker::FusedActivationFunctionType::kRelu6;
135 throw std::runtime_error{"CPU backend: Cannot convert activation type"};
139 inline int32_t getAxis(uint32_t rank, int32_t axis, ir::Layout frontend_layout)
149 if (frontend_layout == ir::Layout::NCHW)
151 int32_t permutation[4] = {0, 3, 1, 2};
152 ret = permutation[ret];
158 void QuantizeMultiplier(double double_multiplier, int32_t *quantized_multiplier, int *shift);
160 void GetQuantizedConvolutionMultiplier(const IPortableTensor *inputDescr,
161 const IPortableTensor *filterDescr,
162 const IPortableTensor *biasDescr,
163 const IPortableTensor *outputDescr, double *multiplier);
165 void QuantizeMultiplierGreaterThanOne(double double_multiplier, int32_t *quantized_multiplier,
168 template <typename T>
169 void CalculateActivationRange(ir::Activation activation, T *activation_min, T *activation_max)
171 if (activation == ir::Activation::RELU)
174 *activation_max = std::numeric_limits<T>::max();
176 else if (activation == ir::Activation::RELU6)
181 else if (activation == ir::Activation::RELU1)
183 *activation_min = -1;
186 else if (activation == ir::Activation::SIGMOID)
191 else if (activation == ir::Activation::NONE)
193 *activation_min = std::numeric_limits<T>::lowest();
194 *activation_max = std::numeric_limits<T>::max();
198 std::cout << "Unsupported fused activation function." << std::endl;
202 void CalculateActivationRangeUint8(ir::Activation activation, const IPortableTensor *output,
203 int32_t *act_min, int32_t *act_max);
205 bool HaveSameShapes(const IPortableTensor *input1, const IPortableTensor *input2);
207 int32_t CalculateInputRadius(int input_integer_bits, int input_left_shift);
209 uint32_t sizeOfData(OperandType type, const std::vector<int32_t> &dimensions);
211 nnfw::cker::PaddingType getPaddingType(ir::PaddingType ir_padding_type);
213 std::vector<int32_t> getReducerAxes(const IPortableTensor *axes);
217 } // namespace backend
220 #endif // __NNFW_SUPPORT_NNAPI_OPERATION_UTILS_H__