eef73edf3d54ff72d321d70d28e353b616dad539
[platform/core/ml/nnfw.git] / runtime / onert / backend / cpu / ops / MulLayer.cc
1 /*
2  * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16
17 #include "MulLayer.h"
18
19 #include <cker/operation/BinaryArithmeticOps.h>
20
21 namespace onert
22 {
23 namespace backend
24 {
25 namespace cpu
26 {
27 namespace ops
28 {
29
30 void MulLayer::mulFloat32()
31 {
32   float output_activation_min = 0, output_activation_max = 0;
33   CalculateActivationRange(_activation, &output_activation_min, &output_activation_max);
34   nnfw::cker::BinaryArithmeticOpParam op_params;
35   op_params.float_activation_max = output_activation_max;
36   op_params.float_activation_min = output_activation_min;
37
38   const bool need_broadcast =
39       nnfw::cker::ProcessBroadcastShapes(getTensorShape(_lhs), getTensorShape(_rhs), &op_params);
40   if (need_broadcast)
41   {
42     nnfw::cker::BroadcastBinaryArithmeticOp<nnfw::cker::BinaryArithmeticOpType::MUL>(
43         op_params, getTensorShape(_lhs), reinterpret_cast<const float *>(_lhs->buffer()),
44         getTensorShape(_rhs), reinterpret_cast<const float *>(_rhs->buffer()),
45         getTensorShape(_output), reinterpret_cast<float *>(_output->buffer()));
46     return;
47   }
48
49   nnfw::cker::BinaryArithmeticOp<nnfw::cker::BinaryArithmeticOpType::MUL>(
50       op_params, getTensorShape(_lhs), reinterpret_cast<const float *>(_lhs->buffer()),
51       getTensorShape(_rhs), reinterpret_cast<const float *>(_rhs->buffer()),
52       getTensorShape(_output), reinterpret_cast<float *>(_output->buffer()));
53 }
54
55 void MulLayer::mulQuant8()
56 {
57   int32_t output_activation_min, output_activation_max;
58   CalculateActivationRangeUint8(_activation, _output, &output_activation_min,
59                                 &output_activation_max);
60   nnfw::cker::BinaryArithmeticOpParam op_params;
61
62   op_params.quantized_activation_max = output_activation_max;
63   op_params.quantized_activation_min = output_activation_min;
64   op_params.input1_offset = -_lhs->data_offset();
65   op_params.input2_offset = -_rhs->data_offset();
66   op_params.output_offset = _output->data_offset();
67
68   double real_multiplier = _lhs->data_scale() * _rhs->data_scale() / _output->data_scale();
69   QuantizeMultiplier(real_multiplier, &op_params.output_multiplier, &op_params.output_shift);
70
71   const bool need_broadcast =
72       nnfw::cker::ProcessBroadcastShapes(getTensorShape(_lhs), getTensorShape(_rhs), &op_params);
73   if (need_broadcast)
74   {
75     nnfw::cker::BroadcastBinaryArithmeticOp<nnfw::cker::BinaryArithmeticOpType::MUL>(
76         op_params, getTensorShape(_lhs), reinterpret_cast<const uint8_t *>(_lhs->buffer()),
77         getTensorShape(_rhs), reinterpret_cast<const uint8_t *>(_rhs->buffer()),
78         getTensorShape(_output), reinterpret_cast<uint8_t *>(_output->buffer()));
79     return;
80   }
81
82   nnfw::cker::BinaryArithmeticOp<nnfw::cker::BinaryArithmeticOpType::MUL>(
83       op_params, getTensorShape(_lhs), reinterpret_cast<const uint8_t *>(_lhs->buffer()),
84       getTensorShape(_rhs), reinterpret_cast<const uint8_t *>(_rhs->buffer()),
85       getTensorShape(_output), reinterpret_cast<uint8_t *>(_output->buffer()));
86 }
87
88 void MulLayer::configure(const IPortableTensor *lhs, const IPortableTensor *rhs,
89                          const ir::Activation activation, IPortableTensor *output)
90 {
91   _lhs = lhs;
92   _rhs = rhs;
93   _activation = activation;
94   _output = output;
95 }
96
97 void MulLayer::run()
98 {
99   if (_output->data_type() == OperandType::FLOAT32)
100   {
101     mulFloat32();
102   }
103   else if (_output->data_type() == OperandType::QUANT_UINT8_ASYMM)
104   {
105     mulQuant8();
106   }
107   else
108   {
109     throw std::runtime_error{"Mul: unsupported data type"};
110   }
111 }
112
113 } // namespace ops
114 } // namespace cpu
115 } // namespace backend
116 } // namespace onert