Imported Upstream version 1.8.0
[platform/core/ml/nnfw.git] / runtime / onert / backend / cpu / ops / SubLayer.cc
1 /*
2  * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16
17 #include "SubLayer.h"
18
19 #include <cker/operation/BinaryArithmeticOps.h>
20
21 namespace onert
22 {
23 namespace backend
24 {
25 namespace cpu
26 {
27 namespace ops
28 {
29
30 void SubLayer::subFloat32()
31 {
32   float output_activation_min = 0, output_activation_max = 0;
33   CalculateActivationRange(_activation, &output_activation_min, &output_activation_max);
34   nnfw::cker::BinaryArithmeticOpParam op_params;
35   op_params.float_activation_max = output_activation_max;
36   op_params.float_activation_min = output_activation_min;
37
38   const bool need_broadcast =
39       nnfw::cker::ProcessBroadcastShapes(getTensorShape(_lhs), getTensorShape(_rhs), &op_params);
40   if (need_broadcast)
41   {
42     nnfw::cker::BroadcastBinaryArithmeticOp<nnfw::cker::BinaryArithmeticOpType::SUB>(
43         op_params, getTensorShape(_lhs), reinterpret_cast<const float *>(_lhs->buffer()),
44         getTensorShape(_rhs), reinterpret_cast<const float *>(_rhs->buffer()),
45         getTensorShape(_output), reinterpret_cast<float *>(_output->buffer()));
46     return;
47   }
48
49   nnfw::cker::BinaryArithmeticOp<nnfw::cker::BinaryArithmeticOpType::SUB>(
50       op_params, getTensorShape(_lhs), reinterpret_cast<const float *>(_lhs->buffer()),
51       getTensorShape(_rhs), reinterpret_cast<const float *>(_rhs->buffer()),
52       getTensorShape(_output), reinterpret_cast<float *>(_output->buffer()));
53 }
54
55 void SubLayer::subInt32()
56 {
57   int32_t output_activation_min = 0, output_activation_max = 0;
58   CalculateActivationRange(_activation, &output_activation_min, &output_activation_max);
59   nnfw::cker::BinaryArithmeticOpParam op_params;
60   op_params.quantized_activation_max = output_activation_max;
61   op_params.quantized_activation_min = output_activation_min;
62
63   const bool need_broadcast =
64       nnfw::cker::ProcessBroadcastShapes(getTensorShape(_lhs), getTensorShape(_rhs), &op_params);
65   if (need_broadcast)
66   {
67     nnfw::cker::BroadcastBinaryArithmeticOp<nnfw::cker::BinaryArithmeticOpType::SUB>(
68         op_params, getTensorShape(_lhs), reinterpret_cast<const int32_t *>(_lhs->buffer()),
69         getTensorShape(_rhs), reinterpret_cast<const int32_t *>(_rhs->buffer()),
70         getTensorShape(_output), reinterpret_cast<int32_t *>(_output->buffer()));
71     return;
72   }
73
74   nnfw::cker::BinaryArithmeticOp<nnfw::cker::BinaryArithmeticOpType::SUB>(
75       op_params, getTensorShape(_lhs), reinterpret_cast<const int32_t *>(_lhs->buffer()),
76       getTensorShape(_rhs), reinterpret_cast<const int32_t *>(_rhs->buffer()),
77       getTensorShape(_output), reinterpret_cast<int32_t *>(_output->buffer()));
78 }
79
80 void SubLayer::subQuant8()
81 {
82   int32_t output_activation_min, output_activation_max;
83   CalculateActivationRangeUint8(_activation, _output, &output_activation_min,
84                                 &output_activation_max);
85   nnfw::cker::BinaryArithmeticOpParam op_params;
86   op_params.quantized_activation_max = output_activation_max;
87   op_params.quantized_activation_min = output_activation_min;
88   // Parameters for scaled quantized computation
89   op_params.left_shift = 20;
90   // Zero-points of input and output tensors
91   op_params.input1_offset = -_lhs->data_offset();
92   op_params.input2_offset = -_rhs->data_offset();
93   op_params.output_offset = _output->data_offset();
94   assert((op_params.input1_offset >= 0) && (op_params.input1_offset <= 255));
95   assert((op_params.input2_offset >= 0) && (op_params.input2_offset <= 255));
96   assert((op_params.output_offset >= 0) && (op_params.output_offset <= 255));
97
98   // Compute normalized scale for _lhs and _rhs values,
99   // and represent in 32-bit fixed point
100   const double norm_max_scale = 2 * std::max(_lhs->data_scale(), _rhs->data_scale());
101   const double real_lhs_scale = _lhs->data_scale() / norm_max_scale;
102   const double real_rhs_scale = _rhs->data_scale() / norm_max_scale;
103   // output scale is used to normalize final result, so we invert the scale here
104   const double real_output_scale =
105       norm_max_scale / (_output->data_scale() * (1 << op_params.left_shift));
106
107   // Represent the scales as fixed int32_t multipliers, and int32_t shifts
108   QuantizeMultiplier(real_lhs_scale, &op_params.input1_multiplier, &op_params.input1_shift);
109   QuantizeMultiplier(real_rhs_scale, &op_params.input2_multiplier, &op_params.input2_shift);
110   op_params.input2_multiplier *= -1;
111   QuantizeMultiplier(real_output_scale, &op_params.output_multiplier, &op_params.output_shift);
112
113   const bool need_broadcast =
114       nnfw::cker::ProcessBroadcastShapes(getTensorShape(_lhs), getTensorShape(_rhs), &op_params);
115   if (need_broadcast)
116   {
117     nnfw::cker::BroadcastBinaryArithmeticOp<nnfw::cker::BinaryArithmeticOpType::SUB>(
118         op_params, getTensorShape(_lhs), reinterpret_cast<const uint8_t *>(_lhs->buffer()),
119         getTensorShape(_rhs), reinterpret_cast<const uint8_t *>(_rhs->buffer()),
120         getTensorShape(_output), reinterpret_cast<uint8_t *>(_output->buffer()));
121     return;
122   }
123
124   nnfw::cker::BinaryArithmeticOp<nnfw::cker::BinaryArithmeticOpType::SUB>(
125       op_params, getTensorShape(_lhs), reinterpret_cast<const uint8_t *>(_lhs->buffer()),
126       getTensorShape(_rhs), reinterpret_cast<const uint8_t *>(_rhs->buffer()),
127       getTensorShape(_output), reinterpret_cast<uint8_t *>(_output->buffer()));
128 }
129
130 void SubLayer::configure(const IPortableTensor *lhs, const IPortableTensor *rhs,
131                          const ir::Activation activation, IPortableTensor *output)
132 {
133   _lhs = lhs;
134   _rhs = rhs;
135   _activation = activation;
136   _output = output;
137 }
138
139 void SubLayer::run()
140 {
141   if (_output->data_type() == OperandType::FLOAT32)
142   {
143     subFloat32();
144   }
145   else if (_output->data_type() == OperandType::QUANT_UINT8_ASYMM)
146   {
147     subQuant8();
148   }
149   else if (_output->data_type() == OperandType::INT32)
150   {
151     subInt32();
152   }
153   else
154   {
155     throw std::runtime_error{"Sub: unsupported data type"};
156   }
157 }
158
159 } // namespace ops
160 } // namespace cpu
161 } // namespace backend
162 } // namespace onert