2 * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
3 * Copyright 2017 The TensorFlow Authors. All Rights Reserved.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
18 #ifndef LUCI_INTERPRETER_KERNELS_UTILS_H
19 #define LUCI_INTERPRETER_KERNELS_UTILS_H
21 #include "core/KernelParams.h"
22 #include "luci_interpreter/core/Tensor.h"
24 #include <tensorflow/lite/kernels/internal/types.h>
29 namespace luci_interpreter
34 inline int32_t computePadding(int32_t stride, int32_t dilation_rate, int32_t in_size,
35 int32_t filter_size, int32_t out_size)
37 const int32_t effective_filter_size = (filter_size - 1) * dilation_rate + 1;
38 const int32_t padding = ((out_size - 1) * stride + effective_filter_size - in_size) / 2;
39 return padding > 0 ? padding : 0;
42 inline int32_t computePaddingWithOffset(int32_t stride, int32_t dilation_rate, int32_t in_size,
43 int32_t filter_size, int32_t out_size, int32_t *offset)
45 int32_t effective_filter_size = (filter_size - 1) * dilation_rate + 1;
46 int32_t total_padding = ((out_size - 1) * stride + effective_filter_size - in_size);
47 total_padding = total_padding > 0 ? total_padding : 0;
48 *offset = total_padding % 2;
49 return total_padding / 2;
52 inline int32_t computeOutputSize(Padding padding, int32_t image_size, int32_t filter_size,
53 int32_t stride, int32_t dilation_rate = 1)
55 const int32_t effective_filter_size = (filter_size - 1) * dilation_rate + 1;
59 return (image_size + stride - 1) / stride;
61 return (image_size + stride - effective_filter_size) / stride;
68 void calculateActivationRange(Activation activation, float *activation_min, float *activation_max);
70 void calculateActivationRangeQuantized(Activation activation, const Tensor *output,
71 int32_t *activation_min, int32_t *activation_max);
73 // Decompose a double multiplier into a Q0.31 int32 representation of its
74 // significand, and shift representation of its exponent.
76 // Handles an arbitrary positive multiplier. The 'shift' output-value is
77 // basically the 'floating-point exponent' of the multiplier:
78 // Negative for a right-shift (when the multiplier is <1), positive for a
79 // left-shift (when the multiplier is >1)
80 void quantizeMultiplier(double double_multiplier, int32_t *quantized_multiplier, int *shift);
82 // Decompose a double multiplier into a Q0.31 int32 representation of its
83 // significand, and shift representation of NEGATIVE its exponent ---
84 // this is intended as a RIGHT-shift.
86 // Restricted to the case where the multiplier < 1 (and non-negative).
87 void quantizeMultiplierSmallerThanOneExp(double double_multiplier, int32_t *quantized_multiplier,
90 Shape calculateShapeForBroadcast(const Shape &input1_shape, const Shape &input2_shape);
92 inline tflite::RuntimeShape getTensorShape(const Tensor *tensor)
94 if (tensor == nullptr)
95 return tflite::RuntimeShape();
97 const Shape &shape = tensor->shape();
98 tflite::RuntimeShape runtime_shape(shape.num_dims());
99 for (int i = 0; i < shape.num_dims(); ++i)
101 runtime_shape.SetDim(i, shape.dim(i));
103 return runtime_shape;
106 template <typename T> const T *getTensorData(const Tensor *tensor)
108 return tensor != nullptr ? tensor->data<T>() : nullptr;
111 template <typename T> T *getTensorData(Tensor *tensor)
113 return tensor != nullptr ? tensor->data<T>() : nullptr;
116 // A list of tensors in a format that can be used by kernels like split and
118 template <typename T, bool is_const> class VectorOfTensors
121 using ElementT = typename std::conditional<is_const, const T, T>::type;
122 using TensorT = typename std::conditional<is_const, const Tensor, Tensor>::type;
124 // Build with the tensors in 'tensor_list'.
125 explicit VectorOfTensors(const std::vector<TensorT *> &tensor_list)
127 const int num_tensors = tensor_list.size();
129 all_data_.reserve(num_tensors);
130 all_shape_.reserve(num_tensors);
131 all_shape_ptr_.reserve(num_tensors);
133 for (TensorT *tensor : tensor_list)
135 all_data_.push_back(getTensorData<T>(tensor));
136 all_shape_.push_back(getTensorShape(tensor));
139 // Taking the pointer from inside a std::vector is only OK if the vector is
140 // never modified, so we populate all_shape in the previous loop and then we
141 // are free to grab iterators here.
142 for (tflite::RuntimeShape &shape : all_shape_)
144 all_shape_ptr_.push_back(&shape);
147 // Return a pointer to the data pointers of all tensors in the list. For
149 // float* const* f = v.data();
150 // f[0][1] is the second element of the first tensor.
151 ElementT *const *data() const { return all_data_.data(); }
153 // Return a pointer the shape pointers of all tensors in the list. For
155 // const RuntimeShape* const* d = v.dims();
156 // dims[1] are the dimensions of the second tensor in the list.
157 const tflite::RuntimeShape *const *shapes() const { return all_shape_ptr_.data(); }
160 std::vector<ElementT *> all_data_;
161 std::vector<tflite::RuntimeShape> all_shape_;
162 std::vector<tflite::RuntimeShape *> all_shape_ptr_;
165 // A list of quantized tensors in a format that can be used by kernels like
166 // split and concatenation.
167 template <bool is_const> class VectorOfQuantizedTensors : public VectorOfTensors<uint8_t, is_const>
170 using typename VectorOfTensors<uint8_t, is_const>::TensorT;
172 // Build with the tensors in 'tensor_list'.
173 explicit VectorOfQuantizedTensors(const std::vector<TensorT *> &tensor_list)
174 : VectorOfTensors<uint8_t, is_const>(tensor_list)
176 for (TensorT *tensor : tensor_list)
178 zero_point_.push_back(tensor->zero_point());
179 scale_.push_back(tensor->scale());
183 const float *scale() const { return scale_.data(); }
184 const int32_t *zero_point() const { return zero_point_.data(); }
187 std::vector<int32_t> zero_point_;
188 std::vector<float> scale_;
191 } // namespace kernels
192 } // namespace luci_interpreter
194 #endif // LUCI_INTERPRETER_KERNELS_UTILS_H