2 * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #include "ElementwiseActivationLayer.h"
19 #include "OperationUtils.h"
21 #include <cker/operation/Logistic.h>
22 #include <cker/operation/ReLU.h>
23 #include <cker/operation/ReLU6.h>
24 #include <cker/operation/Tanh.h>
35 ElementwiseActivationLayer::ElementwiseActivationLayer()
36 : _input(nullptr), _output(nullptr), _kernel()
41 void ElementwiseActivationLayer::PopulateLookupTable(const ElementwiseActivationType op_type)
43 const auto input_scale = static_cast<double>(_input->data_scale());
44 const auto input_zero_point = static_cast<int32_t>(_input->data_offset());
45 const auto output_scale = static_cast<double>(_output->data_scale());
46 const auto output_zero_point = static_cast<int32_t>(_output->data_offset());
47 const float inverse_scale = 1 / output_scale;
48 int32_t maxval = std::numeric_limits<uint8_t>::max();
49 int32_t minval = std::numeric_limits<uint8_t>::min();
50 for (int32_t val = minval; val <= maxval; ++val)
52 const float dequantized = input_scale * (val - input_zero_point);
53 float transformed = 0.f;
54 if (op_type == ElementwiseActivationType::kTanh)
56 transformed = std::tanh(dequantized);
58 else if (op_type == ElementwiseActivationType::kLogistic)
60 transformed = 1.0f / (1.0f + std::exp(-dequantized));
64 throw std::runtime_error("ElementwiseActivationLayer : unsupported activation type");
66 const float rescaled = std::round(transformed * inverse_scale);
67 const int32_t quantized = static_cast<int32_t>(rescaled + output_zero_point);
68 _table[val] = static_cast<uint8_t>(std::max(std::min(maxval, quantized), minval));
72 void ElementwiseActivationLayer::EvalUsingLookupTable(const IPortableTensor *input,
73 IPortableTensor *output)
75 const int size = MatchingFlatSize(getTensorShape(input), getTensorShape(output));
76 const uint8_t *input_data = reinterpret_cast<const uint8_t *>(input->buffer());
77 uint8_t *output_data = reinterpret_cast<uint8_t *>(output->buffer());
79 for (int i = 0; i < size; ++i)
81 output_data[i] = _table[input_data[i]];
85 void ElementwiseActivationLayer::configure(const IPortableTensor *input, IPortableTensor *output,
86 float alpha, float beta,
87 ElementwiseActivationType op_type)
94 case ElementwiseActivationType::kLogistic:
95 if (_input->data_type() == OperandType::QUANT_UINT8_ASYMM)
97 PopulateLookupTable(op_type);
98 _kernel = std::bind(&ElementwiseActivationLayer::EvalUsingLookupTable, this,
99 std::placeholders::_1, std::placeholders::_2);
101 else if (_input->data_type() == OperandType::FLOAT32)
103 _kernel = [](const IPortableTensor *input, IPortableTensor *output) {
104 nnfw::cker::Logistic(getTensorShape(input),
105 reinterpret_cast<const float *>(input->buffer()),
106 getTensorShape(output), reinterpret_cast<float *>(output->buffer()));
111 throw std::runtime_error{"ElementwiseActivationLayer(Logistic): unsupported data type"};
114 case ElementwiseActivationType::kReLU:
115 if (_input->data_type() == OperandType::FLOAT32)
117 if (alpha == std::numeric_limits<float>::infinity() && beta == 0.f)
119 _kernel = [](const IPortableTensor *input, IPortableTensor *output) {
120 nnfw::cker::ReLU(getTensorShape(input),
121 reinterpret_cast<const float *>(input->buffer()),
122 getTensorShape(output), reinterpret_cast<float *>(output->buffer()));
125 else if (alpha == 6.f && beta == 0.f)
127 _kernel = [](const IPortableTensor *input, IPortableTensor *output) {
128 nnfw::cker::ReLU6(getTensorShape(input),
129 reinterpret_cast<const float *>(input->buffer()),
130 reinterpret_cast<float *>(output->buffer()));
135 throw std::runtime_error(
136 "ElementwiseActivationLayer : This layer suppports only ReLU(0-inf) and ReLU6(0-6)");
141 throw std::runtime_error{"ElementwiseActivationLayer(ReLU): unsupported data type"};
144 case ElementwiseActivationType::kTanh:
145 if (_input->data_type() == OperandType::QUANT_UINT8_ASYMM)
147 PopulateLookupTable(op_type);
148 _kernel = std::bind(&ElementwiseActivationLayer::EvalUsingLookupTable, this,
149 std::placeholders::_1, std::placeholders::_2);
151 else if (_input->data_type() == OperandType::FLOAT32)
153 _kernel = [](const IPortableTensor *input, IPortableTensor *output) {
154 nnfw::cker::Tanh(getTensorShape(input), reinterpret_cast<const float *>(input->buffer()),
155 getTensorShape(output), reinterpret_cast<float *>(output->buffer()));
160 throw std::runtime_error{"ElementwiseActivationLayer(Logistic): unsupported data type"};
164 throw std::runtime_error("ElementwiseActivationLayer: unsupported op type");
168 void ElementwiseActivationLayer::run() { _kernel(_input, _output); }
172 } // namespace backend