Imported Upstream version 1.25.0
[platform/core/ml/nnfw.git] / runtime / onert / backend / cpu / ops / ElementwiseActivationLayer.cc
1 /*
2  * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16
17 #include "ElementwiseActivationLayer.h"
18
19 #include "OperationUtils.h"
20
21 #include <cker/operation/ELU.h>
22 #include <cker/operation/LeakyReLU.h>
23 #include <cker/operation/Logistic.h>
24 #include <cker/operation/ReLU.h>
25 #include <cker/operation/ReLU6.h>
26 #include <cker/operation/Tanh.h>
27
28 namespace onert
29 {
30 namespace backend
31 {
32 namespace cpu
33 {
34 namespace ops
35 {
36
37 ElementwiseActivationLayer::ElementwiseActivationLayer()
38   : _input(nullptr), _output(nullptr), _kernel()
39 {
40   // DO NOTHING
41 }
42
43 void ElementwiseActivationLayer::PopulateLookupTable(const ElementwiseActivationType op_type)
44 {
45   const auto input_scale = static_cast<double>(_input->data_scale());
46   const auto input_zero_point = static_cast<int32_t>(_input->data_zero_point());
47   const auto output_scale = static_cast<double>(_output->data_scale());
48   const auto output_zero_point = static_cast<int32_t>(_output->data_zero_point());
49   const float inverse_scale = 1 / output_scale;
50   int32_t maxval = std::numeric_limits<uint8_t>::max();
51   int32_t minval = std::numeric_limits<uint8_t>::min();
52   for (int32_t val = minval; val <= maxval; ++val)
53   {
54     const float dequantized = input_scale * (val - input_zero_point);
55     float transformed = 0.f;
56     if (op_type == ElementwiseActivationType::kTanh)
57     {
58       transformed = std::tanh(dequantized);
59     }
60     else if (op_type == ElementwiseActivationType::kLogistic)
61     {
62       transformed = 1.0f / (1.0f + std::exp(-dequantized));
63     }
64     else
65     {
66       throw std::runtime_error("ElementwiseActivationLayer : unsupported activation type");
67     }
68     const float rescaled = std::round(transformed * inverse_scale);
69     const int32_t quantized = static_cast<int32_t>(rescaled + output_zero_point);
70     _table[val] = static_cast<uint8_t>(std::max(std::min(maxval, quantized), minval));
71   }
72 }
73
74 void ElementwiseActivationLayer::EvalUsingLookupTable(const IPortableTensor *input,
75                                                       IPortableTensor *output)
76 {
77   const int size = MatchingFlatSize(getShape(input), getShape(output));
78   const uint8_t *input_data = getBuffer<uint8_t>(input);
79   uint8_t *output_data = getBuffer<uint8_t>(output);
80
81   for (int i = 0; i < size; ++i)
82   {
83     output_data[i] = _table[input_data[i]];
84   }
85 }
86
87 void ElementwiseActivationLayer::configure(const IPortableTensor *input, IPortableTensor *output,
88                                            float alpha, float beta,
89                                            ElementwiseActivationType op_type)
90 {
91   _input = input;
92   _output = output;
93
94   switch (op_type)
95   {
96     case ElementwiseActivationType::kElu:
97       if (input->data_type() == OperandType::FLOAT32)
98       {
99         _kernel = [](const IPortableTensor *input, IPortableTensor *output) {
100           nnfw::cker::ELU(getShape(input), getBuffer<float>(input), getShape(output),
101                           getBuffer<float>(output));
102         };
103       }
104       else
105       {
106         throw std::runtime_error{"ElementwiseActivationLayer(Elu): unsupported data type"};
107       }
108       break;
109     case ElementwiseActivationType::kLogistic:
110       if (_input->data_type() == OperandType::QUANT_UINT8_ASYMM)
111       {
112         PopulateLookupTable(op_type);
113         _kernel = std::bind(&ElementwiseActivationLayer::EvalUsingLookupTable, this,
114                             std::placeholders::_1, std::placeholders::_2);
115       }
116       else if (_input->data_type() == OperandType::FLOAT32)
117       {
118         _kernel = [](const IPortableTensor *input, IPortableTensor *output) {
119           nnfw::cker::Logistic(getShape(input), getBuffer<float>(input), getShape(output),
120                                getBuffer<float>(output));
121         };
122       }
123       else
124       {
125         throw std::runtime_error{"ElementwiseActivationLayer(Logistic): unsupported data type"};
126       }
127       break;
128     case ElementwiseActivationType::kReLU:
129       if (_input->data_type() == OperandType::FLOAT32)
130       {
131         if (alpha == std::numeric_limits<float>::infinity() && beta == 0.f)
132         {
133           _kernel = [](const IPortableTensor *input, IPortableTensor *output) {
134             nnfw::cker::ReLU(getShape(input), getBuffer<float>(input), getShape(output),
135                              getBuffer<float>(output));
136           };
137         }
138         else if (alpha == 6.f && beta == 0.f)
139         {
140           _kernel = [](const IPortableTensor *input, IPortableTensor *output) {
141             nnfw::cker::ReLU6(getShape(input), getBuffer<float>(input), getBuffer<float>(output));
142           };
143         }
144         else
145         {
146           throw std::runtime_error(
147             "ElementwiseActivationLayer : This layer suppports only ReLU(0-inf) and ReLU6(0-6)");
148         }
149       }
150       else
151       {
152         throw std::runtime_error{"ElementwiseActivationLayer(ReLU): unsupported data type"};
153       }
154       break;
155     case ElementwiseActivationType::kTanh:
156       if (_input->data_type() == OperandType::QUANT_UINT8_ASYMM)
157       {
158         PopulateLookupTable(op_type);
159         _kernel = std::bind(&ElementwiseActivationLayer::EvalUsingLookupTable, this,
160                             std::placeholders::_1, std::placeholders::_2);
161       }
162       else if (_input->data_type() == OperandType::FLOAT32)
163       {
164         _kernel = [](const IPortableTensor *input, IPortableTensor *output) {
165           nnfw::cker::Tanh(getShape(input), getBuffer<float>(input), getShape(output),
166                            getBuffer<float>(output));
167         };
168       }
169       else
170       {
171         throw std::runtime_error{"ElementwiseActivationLayer(Logistic): unsupported data type"};
172       }
173       break;
174     case ElementwiseActivationType::kLeakyReLU:
175       if (_input->data_type() == OperandType::FLOAT32)
176       {
177         _kernel = [alpha](const IPortableTensor *input, IPortableTensor *output) {
178           nnfw::cker::LeakyReLU(nnfw::cker::LeakyReluParams{alpha}, getShape(input),
179                                 getBuffer<float>(input), getShape(output),
180                                 getBuffer<float>(output));
181         };
182       }
183       else
184       {
185         throw std::runtime_error{"ElementwiseActivationLayer(LeakyReLU): unsupported data type"};
186       }
187       break;
188     default:
189       throw std::runtime_error("ElementwiseActivationLayer: unsupported op type");
190   }
191 }
192
193 void ElementwiseActivationLayer::run() { _kernel(_input, _output); }
194
195 } // namespace ops
196 } // namespace cpu
197 } // namespace backend
198 } // namespace onert