#define LUCI_INTERPRETER_KERNELS_UTILS_H
#include "luci_interpreter/core/Tensor.h"
-
-#include <tensorflow/lite/kernels/internal/types.h>
+#include "Builders.h"
+#include "Params.h"
#include <cassert>
#include <cstdint>
+#include <cmath>
+
namespace luci_interpreter
{
namespace kernels
switch (padding)
{
case Padding::SAME:
+ assert(stride != 0);
return (image_size + stride - 1) / stride;
case Padding::VALID:
+ assert(stride != 0);
return (image_size + stride - effective_filter_size) / stride;
default:
assert(false);
template <typename T>
void calculateActivationRange(Activation activation, T *activation_min, T *activation_max);
-tflite::RuntimeShape calculateShapeForBroadcast(const circle::Tensor *input1,
- const circle::Tensor *input2);
+luci_interpreter::RuntimeShape calculateShapeForBroadcast(const circle::Tensor *input1,
+ const circle::Tensor *input2);
// Helper wrapper to hide broadcast logic
template <typename T> class BroadcastableWrapper
int _stride;
};
-inline tflite::RuntimeShape getTensorShape(const circle::Tensor *tensor)
+inline luci_interpreter::RuntimeShape getTensorShape(const circle::Tensor *tensor)
{
if (tensor == nullptr)
- return tflite::RuntimeShape();
+ return luci_interpreter::RuntimeShape();
+
+ auto const tensor_shape = Tensor::tensor_shape(tensor);
- tflite::RuntimeShape runtime_shape(Tensor::num_dims(tensor));
- for (int i = 0; i < Tensor::num_dims(tensor); ++i)
+ luci_interpreter::RuntimeShape runtime_shape(tensor_shape.size());
+ for (int i = 0; i < tensor_shape.size(); ++i)
{
- runtime_shape.SetDim(i, Tensor::dim(tensor, i));
+ runtime_shape.setDim(i, tensor_shape[i]);
}
return runtime_shape;
}
+inline void getTensorDims(const circle::Tensor *tensor, BaseRuntimeGraph *runtime_graph,
+ int32_t *dims)
+{
+ if (tensor == nullptr)
+ {
+ dims = nullptr;
+ return;
+ }
+
+#ifndef DIS_DYN_SHAPES
+ auto *dynamic_shape_vector = runtime_graph->getDynamicShapeTensor(tensor);
+ if (dynamic_shape_vector != nullptr)
+ {
+ for (int n = 0; n < dynamic_shape_vector->dimensionsCount(); ++n)
+ {
+ dims[n] = dynamic_shape_vector->dims(n);
+ }
+ }
+ else
+ {
+ auto const tensor_shape = Tensor::tensor_shape(tensor);
+ assert(tensor_shape.size() <= kMaxSmallSize);
+ for (int i = 0; i < tensor_shape.size(); ++i)
+ {
+ dims[i] = tensor_shape[i];
+ }
+ }
+#else
+ auto const tensor_shape = Tensor::tensor_shape(tensor);
+ assert(tensor_shape.size() <= kMaxSmallSize);
+ for (int i = 0; i < tensor_shape.size(); ++i)
+ {
+ dims[i] = tensor_shape[i];
+ }
+#endif // DIS_DYN_SHAPES
+}
+
template <typename T> const T *getTensorData(const uint8_t *tensor_data)
{
return tensor_data != nullptr ? reinterpret_cast<const T *>(tensor_data) : nullptr;
}
-template <typename T> T *getTensorData(uint8_t *tensor_data)
+template <typename T> inline T *getTensorData(uint8_t *tensor_data)
{
return tensor_data != nullptr ? reinterpret_cast<T *>(tensor_data) : nullptr;
}
+luci_interpreter::RuntimeShape getTensorRuntimeShape(const circle::Tensor *circle_tensor,
+ BaseRuntimeGraph *runtime_graph);
+
// A list of tensors in a format that can be used by kernels like split and
// concatenation.
template <typename T, bool is_const> class VectorOfTensors
// Taking the pointer from inside a std::vector is only OK if the vector is
// never modified, so we populate all_shape in the previous loop and then we
// are free to grab iterators here.
- for (tflite::RuntimeShape &shape : all_shape_)
+ for (luci_interpreter::RuntimeShape &shape : all_shape_)
{
all_shape_ptr_.push_back(&shape);
}
// example:
// const RuntimeShape* const* d = v.dims();
// dims[1] are the dimensions of the second tensor in the list.
- const tflite::RuntimeShape *const *shapes() const { return all_shape_ptr_.data(); }
+ const luci_interpreter::RuntimeShape *const *shapes() const { return all_shape_ptr_.data(); }
private:
std::vector<ElementT *> all_data_;
- std::vector<tflite::RuntimeShape> all_shape_;
- std::vector<tflite::RuntimeShape *> all_shape_ptr_;
+ std::vector<luci_interpreter::RuntimeShape> all_shape_;
+ std::vector<luci_interpreter::RuntimeShape *> all_shape_ptr_;
};
-#ifndef DIS_QUANT
-void calculateActivationRangeQuantized(Activation activation, const circle::Tensor *output,
- int32_t *activation_min, int32_t *activation_max);
-void calculateActivationRangeQuantized(Activation activation, int32_t output_zero_point,
- float output_scale, DataType data_type,
- int32_t *activation_min, int32_t *activation_max);
-
template <typename T> constexpr bool one_of_types() { return false; }
// Checks if T is equal to one of {U,Other} types
void matrixScalarMultiplyAccumulate(const int8_t *matrix, int32_t scalar, int32_t n_row,
int32_t n_col, int32_t *output);
-/**
- * Fills activation min and max parameters depending on given data type and activation
- *
- * T is a template parameter, so after optimization this code left with only required if case
- *
- * @tparam T data type of arithmetic operation output tensor
- * @param params tflite params to fill
- * @param activation luci_interpreter::Activation of arithmetic operation
- */
-template <typename T>
-void fillArithmeticActivationRange(tflite::ArithmeticParams &p, Activation act)
-{
- static_assert(one_of_types<T, float, int32_t, int64_t>(), "Unsupported dtype");
+#ifndef DIS_QUANT
+bool checkedLog2(const float x, int *log2_result);
- if (std::is_same<T, float>::value)
- calculateActivationRange(act, &p.float_activation_min, &p.float_activation_max);
- if (std::is_same<T, int32_t>::value)
- calculateActivationRange(act, &p.quantized_activation_min, &p.quantized_activation_max);
- else
- calculateActivationRange(act, &p.int64_activation_min, &p.int64_activation_max);
-}
+int calculateInputRadius(int input_integer_bits, int input_left_shift, int total_signed_bits);
+
+void calculateActivationRangeQuantized(Activation activation, const circle::Tensor *output,
+ int32_t *activation_min, int32_t *activation_max);
+
+void calculateActivationRangeQuantized(Activation activation, int32_t output_zero_point,
+ float output_scale, DataType data_type,
+ int32_t *activation_min, int32_t *activation_max);
// Decompose a double multiplier into a Q0.31 int32 representation of its
// significand, and shift representation of its exponent.