2 * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #include "kernels/Div.h"
19 #include "kernels/Utils.h"
21 #include <tensorflow/lite/kernels/internal/reference/div.h>
22 #include <tensorflow/lite/kernels/internal/reference/process_broadcast_shapes.h>
24 namespace luci_interpreter
29 Div::Div(const Tensor *input1, const Tensor *input2, Tensor *output, const DivParams ¶ms)
30 : KernelWithParams<DivParams>({input1, input2}, {output}, params)
36 LUCI_INTERPRETER_CHECK(input1()->element_type() == input2()->element_type());
37 LUCI_INTERPRETER_CHECK(input1()->element_type() == output()->element_type());
39 output()->resize(calculateShapeForBroadcast(input1()->shape(), input2()->shape()));
42 void Div::execute() const
44 switch (input1()->element_type())
46 case DataType::FLOAT32:
53 throw std::runtime_error("Unsupported type.");
57 void Div::evalFloat() const
59 float activation_min{};
60 float activation_max{};
61 calculateActivationRange(_params.activation, &activation_min, &activation_max);
63 tflite::ArithmeticParams params{};
64 params.float_activation_min = activation_min;
65 params.float_activation_max = activation_max;
66 const bool need_broadcast = tflite::reference_ops::ProcessBroadcastShapes(
67 getTensorShape(input1()), getTensorShape(input2()), ¶ms);
71 tflite::reference_ops::BroadcastDivSlow(
72 params, getTensorShape(input1()), getTensorData<float>(input1()), getTensorShape(input2()),
73 getTensorData<float>(input2()), getTensorShape(output()), getTensorData<float>(output()));
77 tflite::reference_ops::Div(params, getTensorShape(input1()), getTensorData<float>(input1()),
78 getTensorShape(input2()), getTensorData<float>(input2()),
79 getTensorShape(output()), getTensorData<float>(output()));
83 void Div::evalQuantized() const
85 const auto input1_scale = static_cast<double>(input1()->scale());
86 const auto input2_scale = static_cast<double>(input2()->scale());
87 const auto output_scale = static_cast<double>(output()->scale());
89 const double real_output_multiplier = input1_scale / (input2_scale * output_scale);
91 int32_t output_multiplier{};
94 quantizeMultiplier(real_output_multiplier, &output_multiplier, &output_shift);
96 int32_t activation_min{};
97 int32_t activation_max{};
98 calculateActivationRangeQuantized(_params.activation, output(), &activation_min, &activation_max);
100 tflite::ArithmeticParams params{};
102 params.input1_offset = -input1()->zero_point(); // Note the '-'.
103 params.input2_offset = -input2()->zero_point(); // Note the '-'.
104 params.output_offset = output()->zero_point();
105 params.output_multiplier = output_multiplier;
106 params.output_shift = output_shift;
107 params.quantized_activation_min = activation_min;
108 params.quantized_activation_max = activation_max;
110 const bool need_broadcast = tflite::reference_ops::ProcessBroadcastShapes(
111 getTensorShape(input1()), getTensorShape(input2()), ¶ms);
115 tflite::reference_ops::BroadcastDivSlow(
116 params, getTensorShape(input1()), getTensorData<uint8_t>(input1()), getTensorShape(input2()),
117 getTensorData<uint8_t>(input2()), getTensorShape(output()), getTensorData<uint8_t>(output()));
121 tflite::reference_ops::Div(params, getTensorShape(input1()), getTensorData<uint8_t>(input1()),
122 getTensorShape(input2()), getTensorData<uint8_t>(input2()),
123 getTensorShape(output()), getTensorData<uint8_t>(output()));
127 } // namespace kernels
128 } // namespace luci_interpreter