2 * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #include "ElementwiseActivationLayer.h"
19 #include "OperationUtils.h"
21 #include <cker/operation/ELU.h>
22 #include <cker/operation/LeakyReLU.h>
23 #include <cker/operation/Logistic.h>
24 #include <cker/operation/ReLU.h>
25 #include <cker/operation/ReLU6.h>
26 #include <cker/operation/Tanh.h>
37 ElementwiseActivationLayer::ElementwiseActivationLayer()
38 : _input(nullptr), _output(nullptr), _kernel()
43 void ElementwiseActivationLayer::PopulateLookupTable(const ElementwiseActivationType op_type)
45 const auto input_scale = static_cast<double>(_input->data_scale());
46 const auto input_zero_point = static_cast<int32_t>(_input->data_offset());
47 const auto output_scale = static_cast<double>(_output->data_scale());
48 const auto output_zero_point = static_cast<int32_t>(_output->data_offset());
49 const float inverse_scale = 1 / output_scale;
50 int32_t maxval = std::numeric_limits<uint8_t>::max();
51 int32_t minval = std::numeric_limits<uint8_t>::min();
52 for (int32_t val = minval; val <= maxval; ++val)
54 const float dequantized = input_scale * (val - input_zero_point);
55 float transformed = 0.f;
56 if (op_type == ElementwiseActivationType::kTanh)
58 transformed = std::tanh(dequantized);
60 else if (op_type == ElementwiseActivationType::kLogistic)
62 transformed = 1.0f / (1.0f + std::exp(-dequantized));
66 throw std::runtime_error("ElementwiseActivationLayer : unsupported activation type");
68 const float rescaled = std::round(transformed * inverse_scale);
69 const int32_t quantized = static_cast<int32_t>(rescaled + output_zero_point);
70 _table[val] = static_cast<uint8_t>(std::max(std::min(maxval, quantized), minval));
74 void ElementwiseActivationLayer::EvalUsingLookupTable(const IPortableTensor *input,
75 IPortableTensor *output)
77 const int size = MatchingFlatSize(getTensorShape(input), getTensorShape(output));
78 const uint8_t *input_data = reinterpret_cast<const uint8_t *>(input->buffer());
79 uint8_t *output_data = reinterpret_cast<uint8_t *>(output->buffer());
81 for (int i = 0; i < size; ++i)
83 output_data[i] = _table[input_data[i]];
87 void ElementwiseActivationLayer::configure(const IPortableTensor *input, IPortableTensor *output,
88 float alpha, float beta,
89 ElementwiseActivationType op_type)
96 case ElementwiseActivationType::kElu:
97 if (input->data_type() == OperandType::FLOAT32)
99 _kernel = [](const IPortableTensor *input, IPortableTensor *output) {
100 nnfw::cker::ELU(getTensorShape(input), reinterpret_cast<const float *>(input->buffer()),
101 getTensorShape(output), reinterpret_cast<float *>(output->buffer()));
106 throw std::runtime_error{"ElementwiseActivationLayer(Elu): unsupported data type"};
109 case ElementwiseActivationType::kLogistic:
110 if (_input->data_type() == OperandType::QUANT_UINT8_ASYMM)
112 PopulateLookupTable(op_type);
113 _kernel = std::bind(&ElementwiseActivationLayer::EvalUsingLookupTable, this,
114 std::placeholders::_1, std::placeholders::_2);
116 else if (_input->data_type() == OperandType::FLOAT32)
118 _kernel = [](const IPortableTensor *input, IPortableTensor *output) {
119 nnfw::cker::Logistic(getTensorShape(input),
120 reinterpret_cast<const float *>(input->buffer()),
121 getTensorShape(output), reinterpret_cast<float *>(output->buffer()));
126 throw std::runtime_error{"ElementwiseActivationLayer(Logistic): unsupported data type"};
129 case ElementwiseActivationType::kReLU:
130 if (_input->data_type() == OperandType::FLOAT32)
132 if (alpha == std::numeric_limits<float>::infinity() && beta == 0.f)
134 _kernel = [](const IPortableTensor *input, IPortableTensor *output) {
135 nnfw::cker::ReLU(getTensorShape(input),
136 reinterpret_cast<const float *>(input->buffer()),
137 getTensorShape(output), reinterpret_cast<float *>(output->buffer()));
140 else if (alpha == 6.f && beta == 0.f)
142 _kernel = [](const IPortableTensor *input, IPortableTensor *output) {
143 nnfw::cker::ReLU6(getTensorShape(input),
144 reinterpret_cast<const float *>(input->buffer()),
145 reinterpret_cast<float *>(output->buffer()));
150 throw std::runtime_error(
151 "ElementwiseActivationLayer : This layer suppports only ReLU(0-inf) and ReLU6(0-6)");
156 throw std::runtime_error{"ElementwiseActivationLayer(ReLU): unsupported data type"};
159 case ElementwiseActivationType::kTanh:
160 if (_input->data_type() == OperandType::QUANT_UINT8_ASYMM)
162 PopulateLookupTable(op_type);
163 _kernel = std::bind(&ElementwiseActivationLayer::EvalUsingLookupTable, this,
164 std::placeholders::_1, std::placeholders::_2);
166 else if (_input->data_type() == OperandType::FLOAT32)
168 _kernel = [](const IPortableTensor *input, IPortableTensor *output) {
169 nnfw::cker::Tanh(getTensorShape(input), reinterpret_cast<const float *>(input->buffer()),
170 getTensorShape(output), reinterpret_cast<float *>(output->buffer()));
175 throw std::runtime_error{"ElementwiseActivationLayer(Logistic): unsupported data type"};
178 case ElementwiseActivationType::kLeakyReLU:
179 if (_input->data_type() == OperandType::FLOAT32)
181 _kernel = [alpha](const IPortableTensor *input, IPortableTensor *output) {
182 nnfw::cker::LeakyReLU(nnfw::cker::LeakyReluParams{alpha}, getTensorShape(input),
183 reinterpret_cast<const float *>(input->buffer()),
184 getTensorShape(output),
185 reinterpret_cast<float *>(output->buffer()));
190 throw std::runtime_error{"ElementwiseActivationLayer(LeakyReLU): unsupported data type"};
194 throw std::runtime_error("ElementwiseActivationLayer: unsupported op type");
198 void ElementwiseActivationLayer::run() { _kernel(_input, _output); }
202 } // namespace backend