2 * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #include "ElementwiseActivationLayer.h"
19 #include "OperationUtils.h"
21 #include <cker/operation/ELU.h>
22 #include <cker/operation/LeakyReLU.h>
23 #include <cker/operation/Logistic.h>
24 #include <cker/operation/ReLU.h>
25 #include <cker/operation/ReLU6.h>
26 #include <cker/operation/Tanh.h>
37 ElementwiseActivationLayer::ElementwiseActivationLayer()
38 : _input(nullptr), _output(nullptr), _kernel()
43 void ElementwiseActivationLayer::PopulateLookupTable(const ElementwiseActivationType op_type)
45 const auto input_scale = static_cast<double>(_input->data_scale());
46 const auto input_zero_point = static_cast<int32_t>(_input->data_zero_point());
47 const auto output_scale = static_cast<double>(_output->data_scale());
48 const auto output_zero_point = static_cast<int32_t>(_output->data_zero_point());
49 const float inverse_scale = 1 / output_scale;
50 int32_t maxval = std::numeric_limits<uint8_t>::max();
51 int32_t minval = std::numeric_limits<uint8_t>::min();
52 for (int32_t val = minval; val <= maxval; ++val)
54 const float dequantized = input_scale * (val - input_zero_point);
55 float transformed = 0.f;
56 if (op_type == ElementwiseActivationType::kTanh)
58 transformed = std::tanh(dequantized);
60 else if (op_type == ElementwiseActivationType::kLogistic)
62 transformed = 1.0f / (1.0f + std::exp(-dequantized));
66 throw std::runtime_error("ElementwiseActivationLayer : unsupported activation type");
68 const float rescaled = std::round(transformed * inverse_scale);
69 const int32_t quantized = static_cast<int32_t>(rescaled + output_zero_point);
70 _table[val] = static_cast<uint8_t>(std::max(std::min(maxval, quantized), minval));
74 void ElementwiseActivationLayer::EvalUsingLookupTable(const IPortableTensor *input,
75 IPortableTensor *output)
77 const int size = MatchingFlatSize(getShape(input), getShape(output));
78 const uint8_t *input_data = getBuffer<uint8_t>(input);
79 uint8_t *output_data = getBuffer<uint8_t>(output);
81 for (int i = 0; i < size; ++i)
83 output_data[i] = _table[input_data[i]];
87 void ElementwiseActivationLayer::configure(const IPortableTensor *input, IPortableTensor *output,
88 float alpha, float beta,
89 ElementwiseActivationType op_type)
96 case ElementwiseActivationType::kElu:
97 if (input->data_type() == OperandType::FLOAT32)
99 _kernel = [](const IPortableTensor *input, IPortableTensor *output) {
100 nnfw::cker::ELU(getShape(input), getBuffer<float>(input), getShape(output),
101 getBuffer<float>(output));
106 throw std::runtime_error{"ElementwiseActivationLayer(Elu): unsupported data type"};
109 case ElementwiseActivationType::kLogistic:
110 if (_input->data_type() == OperandType::QUANT_UINT8_ASYMM)
112 PopulateLookupTable(op_type);
113 _kernel = std::bind(&ElementwiseActivationLayer::EvalUsingLookupTable, this,
114 std::placeholders::_1, std::placeholders::_2);
116 else if (_input->data_type() == OperandType::FLOAT32)
118 _kernel = [](const IPortableTensor *input, IPortableTensor *output) {
119 nnfw::cker::Logistic(getShape(input), getBuffer<float>(input), getShape(output),
120 getBuffer<float>(output));
125 throw std::runtime_error{"ElementwiseActivationLayer(Logistic): unsupported data type"};
128 case ElementwiseActivationType::kReLU:
129 if (_input->data_type() == OperandType::FLOAT32)
131 if (alpha == std::numeric_limits<float>::infinity() && beta == 0.f)
133 _kernel = [](const IPortableTensor *input, IPortableTensor *output) {
134 nnfw::cker::ReLU(getShape(input), getBuffer<float>(input), getShape(output),
135 getBuffer<float>(output));
138 else if (alpha == 6.f && beta == 0.f)
140 _kernel = [](const IPortableTensor *input, IPortableTensor *output) {
141 nnfw::cker::ReLU6(getShape(input), getBuffer<float>(input), getBuffer<float>(output));
146 throw std::runtime_error(
147 "ElementwiseActivationLayer : This layer suppports only ReLU(0-inf) and ReLU6(0-6)");
152 throw std::runtime_error{"ElementwiseActivationLayer(ReLU): unsupported data type"};
155 case ElementwiseActivationType::kTanh:
156 if (_input->data_type() == OperandType::QUANT_UINT8_ASYMM)
158 PopulateLookupTable(op_type);
159 _kernel = std::bind(&ElementwiseActivationLayer::EvalUsingLookupTable, this,
160 std::placeholders::_1, std::placeholders::_2);
162 else if (_input->data_type() == OperandType::FLOAT32)
164 _kernel = [](const IPortableTensor *input, IPortableTensor *output) {
165 nnfw::cker::Tanh(getShape(input), getBuffer<float>(input), getShape(output),
166 getBuffer<float>(output));
171 throw std::runtime_error{"ElementwiseActivationLayer(Logistic): unsupported data type"};
174 case ElementwiseActivationType::kLeakyReLU:
175 if (_input->data_type() == OperandType::FLOAT32)
177 _kernel = [alpha](const IPortableTensor *input, IPortableTensor *output) {
178 nnfw::cker::LeakyReLU(nnfw::cker::LeakyReluParams{alpha}, getShape(input),
179 getBuffer<float>(input), getShape(output),
180 getBuffer<float>(output));
185 throw std::runtime_error{"ElementwiseActivationLayer(LeakyReLU): unsupported data type"};
189 throw std::runtime_error("ElementwiseActivationLayer: unsupported op type");
193 void ElementwiseActivationLayer::run() { _kernel(_input, _output); }
197 } // namespace backend