2 * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
3 * Copyright 2017 The TensorFlow Authors. All Rights Reserved.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
18 #ifndef LUCI_INTERPRETER_KERNELS_UTILS_H
19 #define LUCI_INTERPRETER_KERNELS_UTILS_H
21 #include "core/KernelParams.h"
22 #include "luci_interpreter/core/Tensor.h"
24 #include <tensorflow/lite/kernels/internal/types.h>
29 namespace luci_interpreter
34 #define LUCI_INTERPRETER_CHECK(cond) \
36 throw std::runtime_error(std::string(__FILE__) + ":" + std::to_string(__LINE__) + +"(" + \
37 std::string(#cond) + ") was not true.");
39 inline int32_t computePadding(int32_t stride, int32_t dilation_rate, int32_t in_size,
40 int32_t filter_size, int32_t out_size)
42 const int32_t effective_filter_size = (filter_size - 1) * dilation_rate + 1;
43 const int32_t padding = ((out_size - 1) * stride + effective_filter_size - in_size) / 2;
44 return padding > 0 ? padding : 0;
47 inline int32_t computePaddingWithOffset(int32_t stride, int32_t dilation_rate, int32_t in_size,
48 int32_t filter_size, int32_t out_size, int32_t *offset)
50 int32_t effective_filter_size = (filter_size - 1) * dilation_rate + 1;
51 int32_t total_padding = ((out_size - 1) * stride + effective_filter_size - in_size);
52 total_padding = total_padding > 0 ? total_padding : 0;
53 *offset = total_padding % 2;
54 return total_padding / 2;
57 inline int32_t computeOutputSize(Padding padding, int32_t image_size, int32_t filter_size,
58 int32_t stride, int32_t dilation_rate = 1)
60 const int32_t effective_filter_size = (filter_size - 1) * dilation_rate + 1;
64 return (image_size + stride - 1) / stride;
66 return (image_size + stride - effective_filter_size) / stride;
73 void calculateActivationRange(Activation activation, float *activation_min, float *activation_max);
75 void calculateActivationRangeQuantized(Activation activation, const Tensor *output,
76 int32_t *activation_min, int32_t *activation_max);
78 // Decompose a double multiplier into a Q0.31 int32 representation of its
79 // significand, and shift representation of its exponent.
81 // Handles an arbitrary positive multiplier. The 'shift' output-value is
82 // basically the 'floating-point exponent' of the multiplier:
83 // Negative for a right-shift (when the multiplier is <1), positive for a
84 // left-shift (when the multiplier is >1)
85 void quantizeMultiplier(double double_multiplier, int32_t *quantized_multiplier, int *shift);
87 // Decompose a double multiplier into a Q0.31 int32 representation of its
88 // significand, and shift representation of NEGATIVE its exponent ---
89 // this is intended as a RIGHT-shift.
91 // Restricted to the case where the multiplier < 1 (and non-negative).
92 void quantizeMultiplierSmallerThanOneExp(double double_multiplier, int32_t *quantized_multiplier,
95 Shape calculateShapeForBroadcast(const Shape &input1_shape, const Shape &input2_shape);
97 inline tflite::RuntimeShape getTensorShape(const Tensor *tensor)
99 if (tensor == nullptr)
100 return tflite::RuntimeShape();
102 const Shape &shape = tensor->shape();
103 tflite::RuntimeShape runtime_shape(shape.num_dims());
104 for (int i = 0; i < shape.num_dims(); ++i)
106 runtime_shape.SetDim(i, shape.dim(i));
108 return runtime_shape;
111 template <typename T> const T *getTensorData(const Tensor *tensor)
113 return tensor != nullptr ? tensor->data<T>() : nullptr;
116 template <typename T> T *getTensorData(Tensor *tensor)
118 return tensor != nullptr ? tensor->data<T>() : nullptr;
121 // A list of tensors in a format that can be used by kernels like split and
123 template <typename T, bool is_const> class VectorOfTensors
126 using ElementT = typename std::conditional<is_const, const T, T>::type;
127 using TensorT = typename std::conditional<is_const, const Tensor, Tensor>::type;
129 // Build with the tensors in 'tensor_list'.
130 explicit VectorOfTensors(const std::vector<TensorT *> &tensor_list)
132 const int num_tensors = tensor_list.size();
134 all_data_.reserve(num_tensors);
135 all_shape_.reserve(num_tensors);
136 all_shape_ptr_.reserve(num_tensors);
138 for (TensorT *tensor : tensor_list)
140 all_data_.push_back(getTensorData<T>(tensor));
141 all_shape_.push_back(getTensorShape(tensor));
144 // Taking the pointer from inside a std::vector is only OK if the vector is
145 // never modified, so we populate all_shape in the previous loop and then we
146 // are free to grab iterators here.
147 for (tflite::RuntimeShape &shape : all_shape_)
149 all_shape_ptr_.push_back(&shape);
152 // Return a pointer to the data pointers of all tensors in the list. For
154 // float* const* f = v.data();
155 // f[0][1] is the second element of the first tensor.
156 ElementT *const *data() const { return all_data_.data(); }
158 // Return a pointer the shape pointers of all tensors in the list. For
160 // const RuntimeShape* const* d = v.dims();
161 // dims[1] are the dimensions of the second tensor in the list.
162 const tflite::RuntimeShape *const *shapes() const { return all_shape_ptr_.data(); }
165 std::vector<ElementT *> all_data_;
166 std::vector<tflite::RuntimeShape> all_shape_;
167 std::vector<tflite::RuntimeShape *> all_shape_ptr_;
170 // A list of quantized tensors in a format that can be used by kernels like
171 // split and concatenation.
172 template <bool is_const> class VectorOfQuantizedTensors : public VectorOfTensors<uint8_t, is_const>
175 using typename VectorOfTensors<uint8_t, is_const>::TensorT;
177 // Build with the tensors in 'tensor_list'.
178 explicit VectorOfQuantizedTensors(const std::vector<TensorT *> &tensor_list)
179 : VectorOfTensors<uint8_t, is_const>(tensor_list)
181 for (TensorT *tensor : tensor_list)
183 zero_point_.push_back(tensor->zero_point());
184 scale_.push_back(tensor->scale());
188 const float *scale() const { return scale_.data(); }
189 const int32_t *zero_point() const { return zero_point_.data(); }
192 std::vector<int32_t> zero_point_;
193 std::vector<float> scale_;
196 } // namespace kernels
197 } // namespace luci_interpreter
199 #endif // LUCI_INTERPRETER_KERNELS_UTILS_H