Imported Upstream version 1.12.0
[platform/core/ml/nnfw.git] / runtime / onert / backend / cpu / ops / ElementwiseActivationLayer.cc
1 /*
2  * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16
17 #include "ElementwiseActivationLayer.h"
18
19 #include "OperationUtils.h"
20
21 #include <cker/operation/ELU.h>
22 #include <cker/operation/LeakyReLU.h>
23 #include <cker/operation/Logistic.h>
24 #include <cker/operation/ReLU.h>
25 #include <cker/operation/ReLU6.h>
26 #include <cker/operation/Tanh.h>
27
28 namespace onert
29 {
30 namespace backend
31 {
32 namespace cpu
33 {
34 namespace ops
35 {
36
37 ElementwiseActivationLayer::ElementwiseActivationLayer()
38     : _input(nullptr), _output(nullptr), _kernel()
39 {
40   // DO NOTHING
41 }
42
43 void ElementwiseActivationLayer::PopulateLookupTable(const ElementwiseActivationType op_type)
44 {
45   const auto input_scale = static_cast<double>(_input->data_scale());
46   const auto input_zero_point = static_cast<int32_t>(_input->data_offset());
47   const auto output_scale = static_cast<double>(_output->data_scale());
48   const auto output_zero_point = static_cast<int32_t>(_output->data_offset());
49   const float inverse_scale = 1 / output_scale;
50   int32_t maxval = std::numeric_limits<uint8_t>::max();
51   int32_t minval = std::numeric_limits<uint8_t>::min();
52   for (int32_t val = minval; val <= maxval; ++val)
53   {
54     const float dequantized = input_scale * (val - input_zero_point);
55     float transformed = 0.f;
56     if (op_type == ElementwiseActivationType::kTanh)
57     {
58       transformed = std::tanh(dequantized);
59     }
60     else if (op_type == ElementwiseActivationType::kLogistic)
61     {
62       transformed = 1.0f / (1.0f + std::exp(-dequantized));
63     }
64     else
65     {
66       throw std::runtime_error("ElementwiseActivationLayer : unsupported activation type");
67     }
68     const float rescaled = std::round(transformed * inverse_scale);
69     const int32_t quantized = static_cast<int32_t>(rescaled + output_zero_point);
70     _table[val] = static_cast<uint8_t>(std::max(std::min(maxval, quantized), minval));
71   }
72 }
73
74 void ElementwiseActivationLayer::EvalUsingLookupTable(const IPortableTensor *input,
75                                                       IPortableTensor *output)
76 {
77   const int size = MatchingFlatSize(getTensorShape(input), getTensorShape(output));
78   const uint8_t *input_data = reinterpret_cast<const uint8_t *>(input->buffer());
79   uint8_t *output_data = reinterpret_cast<uint8_t *>(output->buffer());
80
81   for (int i = 0; i < size; ++i)
82   {
83     output_data[i] = _table[input_data[i]];
84   }
85 }
86
87 void ElementwiseActivationLayer::configure(const IPortableTensor *input, IPortableTensor *output,
88                                            float alpha, float beta,
89                                            ElementwiseActivationType op_type)
90 {
91   _input = input;
92   _output = output;
93
94   switch (op_type)
95   {
96     case ElementwiseActivationType::kElu:
97       if (input->data_type() == OperandType::FLOAT32)
98       {
99         _kernel = [](const IPortableTensor *input, IPortableTensor *output) {
100           nnfw::cker::ELU(getTensorShape(input), reinterpret_cast<const float *>(input->buffer()),
101                           getTensorShape(output), reinterpret_cast<float *>(output->buffer()));
102         };
103       }
104       else
105       {
106         throw std::runtime_error{"ElementwiseActivationLayer(Elu): unsupported data type"};
107       }
108       break;
109     case ElementwiseActivationType::kLogistic:
110       if (_input->data_type() == OperandType::QUANT_UINT8_ASYMM)
111       {
112         PopulateLookupTable(op_type);
113         _kernel = std::bind(&ElementwiseActivationLayer::EvalUsingLookupTable, this,
114                             std::placeholders::_1, std::placeholders::_2);
115       }
116       else if (_input->data_type() == OperandType::FLOAT32)
117       {
118         _kernel = [](const IPortableTensor *input, IPortableTensor *output) {
119           nnfw::cker::Logistic(getTensorShape(input),
120                                reinterpret_cast<const float *>(input->buffer()),
121                                getTensorShape(output), reinterpret_cast<float *>(output->buffer()));
122         };
123       }
124       else
125       {
126         throw std::runtime_error{"ElementwiseActivationLayer(Logistic): unsupported data type"};
127       }
128       break;
129     case ElementwiseActivationType::kReLU:
130       if (_input->data_type() == OperandType::FLOAT32)
131       {
132         if (alpha == std::numeric_limits<float>::infinity() && beta == 0.f)
133         {
134           _kernel = [](const IPortableTensor *input, IPortableTensor *output) {
135             nnfw::cker::ReLU(getTensorShape(input),
136                              reinterpret_cast<const float *>(input->buffer()),
137                              getTensorShape(output), reinterpret_cast<float *>(output->buffer()));
138           };
139         }
140         else if (alpha == 6.f && beta == 0.f)
141         {
142           _kernel = [](const IPortableTensor *input, IPortableTensor *output) {
143             nnfw::cker::ReLU6(getTensorShape(input),
144                               reinterpret_cast<const float *>(input->buffer()),
145                               reinterpret_cast<float *>(output->buffer()));
146           };
147         }
148         else
149         {
150           throw std::runtime_error(
151               "ElementwiseActivationLayer : This layer suppports only ReLU(0-inf) and ReLU6(0-6)");
152         }
153       }
154       else
155       {
156         throw std::runtime_error{"ElementwiseActivationLayer(ReLU): unsupported data type"};
157       }
158       break;
159     case ElementwiseActivationType::kTanh:
160       if (_input->data_type() == OperandType::QUANT_UINT8_ASYMM)
161       {
162         PopulateLookupTable(op_type);
163         _kernel = std::bind(&ElementwiseActivationLayer::EvalUsingLookupTable, this,
164                             std::placeholders::_1, std::placeholders::_2);
165       }
166       else if (_input->data_type() == OperandType::FLOAT32)
167       {
168         _kernel = [](const IPortableTensor *input, IPortableTensor *output) {
169           nnfw::cker::Tanh(getTensorShape(input), reinterpret_cast<const float *>(input->buffer()),
170                            getTensorShape(output), reinterpret_cast<float *>(output->buffer()));
171         };
172       }
173       else
174       {
175         throw std::runtime_error{"ElementwiseActivationLayer(Logistic): unsupported data type"};
176       }
177       break;
178     case ElementwiseActivationType::kLeakyReLU:
179       if (_input->data_type() == OperandType::FLOAT32)
180       {
181         _kernel = [alpha](const IPortableTensor *input, IPortableTensor *output) {
182           nnfw::cker::LeakyReLU(nnfw::cker::LeakyReluParams{alpha}, getTensorShape(input),
183                                 reinterpret_cast<const float *>(input->buffer()),
184                                 getTensorShape(output),
185                                 reinterpret_cast<float *>(output->buffer()));
186         };
187       }
188       else
189       {
190         throw std::runtime_error{"ElementwiseActivationLayer(LeakyReLU): unsupported data type"};
191       }
192       break;
193     default:
194       throw std::runtime_error("ElementwiseActivationLayer: unsupported op type");
195   }
196 }
197
198 void ElementwiseActivationLayer::run() { _kernel(_input, _output); }
199
200 } // namespace ops
201 } // namespace cpu
202 } // namespace backend
203 } // namespace onert