2 * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #include "SoftMaxLayer.h"
19 #include "OperationUtils.h"
21 #include <cker/operation/SoftMax.h>
32 SoftMaxLayer::SoftMaxLayer() : _input(nullptr), _output(nullptr), _beta(0.0)
37 void SoftMaxLayer::softmaxFloat32()
39 if (getNumberOfDimensions(_input) == 1)
41 uint32_t input_size = getNumberOfElements(_input);
42 nnfw::cker::Softmax(reinterpret_cast<const float *>(_input->buffer()), input_size, 1, _beta,
43 reinterpret_cast<float *>(_output->buffer()));
45 else if (getNumberOfDimensions(_input) == 2)
47 uint32_t batch_size = getSizeOfDimension(_input, 0);
49 throw std::runtime_error("batch_size should not be 0");
51 uint32_t input_size = getNumberOfElements(_input) / batch_size;
52 nnfw::cker::Softmax(reinterpret_cast<const float *>(_input->buffer()), input_size, batch_size,
53 _beta, reinterpret_cast<float *>(_output->buffer()));
55 else if (getNumberOfDimensions(_input) == 4)
57 nnfw::cker::SoftmaxParams op_params;
58 op_params.beta = _beta;
59 nnfw::cker::Softmax(op_params, getTensorShape(_input),
60 reinterpret_cast<const float *>(_input->buffer()), getTensorShape(_output),
61 reinterpret_cast<float *>(_output->buffer()));
65 throw std::runtime_error{"only 1D, 2D and 4D tensors supported"};
69 void SoftMaxLayer::softmaxQuant8()
71 nnfw::cker::Shape descrIn4D(4);
73 if (getNumberOfDimensions(_input) == 2)
75 auto batch_size = getSizeOfDimension(_input, 0);
77 throw std::runtime_error("batch_size should not be 0");
79 auto input_size = getNumberOfElements(_input) / batch_size;
80 descrIn4D.SetDim(0, batch_size);
81 descrIn4D.SetDim(1, 1);
82 descrIn4D.SetDim(2, 1);
83 descrIn4D.SetDim(3, input_size);
85 else if (getNumberOfDimensions(_input) == 4)
87 descrIn4D.SetDim(0, _input->dimension(0));
88 descrIn4D.SetDim(1, _input->dimension(1));
89 descrIn4D.SetDim(2, _input->dimension(2));
90 descrIn4D.SetDim(3, _input->dimension(3));
94 throw std::runtime_error{"only 2D and 4D tensors supported"};
96 if (_output->data_offset() != 0 || _output->data_scale() != 1.f / 256)
98 throw std::runtime_error{"incorrect scale / offset for output"};
100 static const int32_t kScaledDiffIntegerBits = 5;
101 const double input_beta_real_multiplier = std::min(
102 1.0 * _beta * _input->data_scale() * (1 << (31 - kScaledDiffIntegerBits)), (1ll << 31) - 1.0);
103 int32_t input_multiplier = 0;
104 int32_t input_left_shift = 0;
105 QuantizeMultiplierGreaterThanOne(input_beta_real_multiplier, &input_multiplier,
107 float diff_min = -1.0f * CalculateInputRadius(kScaledDiffIntegerBits, input_left_shift);
109 nnfw::cker::SoftmaxParams op_params;
110 op_params.input_multiplier = input_multiplier;
111 op_params.input_left_shift = input_left_shift;
112 op_params.diff_min = diff_min;
113 nnfw::cker::Softmax(op_params, descrIn4D, reinterpret_cast<const uint8_t *>(_input->buffer()),
114 descrIn4D, reinterpret_cast<uint8_t *>(_output->buffer()));
117 void SoftMaxLayer::configure(const IPortableTensor *input, const float beta,
118 IPortableTensor *output)
125 void SoftMaxLayer::run()
127 if (_input->data_type() == OperandType::FLOAT32)
131 else if (_input->data_type() == OperandType::QUANT_UINT8_ASYMM)
137 throw std::runtime_error{"SoftMax: unsupported data type"};
143 } // namespace backend