Imported Upstream version 1.25.0
[platform/core/ml/nnfw.git] / onert-micro / luci-interpreter / src / kernels / Utils.cpp
index 0810b82..35ab821 100644 (file)
@@ -26,6 +26,26 @@ namespace luci_interpreter
 namespace kernels
 {
 
+luci_interpreter::RuntimeShape getTensorRuntimeShape(const circle::Tensor *circle_tensor,
+                                                     BaseRuntimeGraph *runtime_graph)
+{
+  luci_interpreter::RuntimeShape input_shape = getTensorShape(circle_tensor);
+
+#ifndef DIS_DYN_SHAPES
+  auto *dynamic_shape_vector = runtime_graph->getDynamicShapeTensor(circle_tensor);
+  if (dynamic_shape_vector != nullptr)
+  {
+    input_shape.resize(dynamic_shape_vector->dimensionsCount());
+
+    for (int n = 0; n < dynamic_shape_vector->dimensionsCount(); ++n)
+    {
+      input_shape.setDim(n, dynamic_shape_vector->dims(n));
+    }
+  }
+#endif // DIS_DYN_SHAPES
+  return input_shape;
+}
+
 template <typename T>
 void calculateActivationRange(Activation activation, T *activation_min, T *activation_max)
 {
@@ -74,6 +94,26 @@ template void calculateActivationRange(Activation activation, int64_t *activatio
                                        int64_t *activation_max);
 
 #ifndef DIS_QUANT
+bool checkedLog2(const float x, int *log2_result)
+{
+  const float x_log2 = std::log(x) * (1.0f / std::log(2.0f));
+  const float x_log2_rounded = std::round(x_log2);
+  const float x_log2_fracpart = x_log2 - x_log2_rounded;
+
+  *log2_result = static_cast<int>(x_log2_rounded);
+  return std::abs(x_log2_fracpart) < 1e-3f;
+}
+
+int calculateInputRadius(int input_integer_bits, int input_left_shift, int total_signed_bits)
+{
+  const double max_input_rescaled = 1.0 * ((1 << input_integer_bits) - 1) *
+                                    (1LL << (total_signed_bits - input_integer_bits)) /
+                                    (1LL << input_left_shift);
+  // Tighten bound using floor.  Suppose that we could use the exact value.
+  // After scaling the difference, the result would be at the maximum.  Thus we
+  // must ensure that our value has lower magnitude.
+  return static_cast<int>(std::floor(max_input_rescaled));
+}
 
 static void calculateActivationRangeQuantizedImpl(Activation activation, int32_t qmin, int32_t qmax,
                                                   int32_t zero_point, float scale,
@@ -206,13 +246,13 @@ void quantizeMultiplierSmallerThanOneExp(double double_multiplier, int32_t *quan
 }
 #endif
 
-tflite::RuntimeShape calculateShapeForBroadcast(const circle::Tensor *input1,
-                                                const circle::Tensor *input2)
+luci_interpreter::RuntimeShape calculateShapeForBroadcast(const circle::Tensor *input1,
+                                                          const circle::Tensor *input2)
 {
   const int num_input1_dims = Tensor::num_dims(input1);
   const int num_input2_dims = Tensor::num_dims(input2);
   const int num_out_dims = std::max(num_input1_dims, num_input2_dims);
-  tflite::RuntimeShape output_shape(num_out_dims);
+  luci_interpreter::RuntimeShape output_shape(num_out_dims);
 
   for (int i = 0; i < num_out_dims; ++i)
   {
@@ -225,7 +265,7 @@ tflite::RuntimeShape calculateShapeForBroadcast(const circle::Tensor *input1,
     bool can_broadcast = input1_dim == 1 || input2_dim == 1;
     LUCI_INTERPRETER_CHECK(!need_broadcast || can_broadcast);
 
-    output_shape.SetDim(num_out_dims - i - 1, std::max(input1_dim, input2_dim));
+    output_shape.setDim(num_out_dims - i - 1, std::max(input1_dim, input2_dim));
   }
 
   return output_shape;