2 * Copyright (c) 2017-2018 ARM Limited.
4 * SPDX-License-Identifier: MIT
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 #ifndef __ARM_COMPUTE_CLSOFTMAXLAYER_H__
25 #define __ARM_COMPUTE_CLSOFTMAXLAYER_H__
27 #include "arm_compute/core/CL/kernels/CLSoftmaxLayerKernel.h"
28 #include "arm_compute/runtime/CL/CLMemoryGroup.h"
29 #include "arm_compute/runtime/CL/CLTensor.h"
30 #include "arm_compute/runtime/IFunction.h"
31 #include "arm_compute/runtime/IMemoryManager.h"
39 /** Basic function to compute a SoftmaxLayer.
41 * Softmax is calculated by :
42 * @f[ out = exp((x - max(x)) * beta) / sum(exp((x - max(x)) * beta)) @f]
44 * This function runs the following kernels:
45 * -# @ref CLLogits1DMaxKernel
46 * -# @ref CLLogits1DShiftExpSumKernel
47 * -# @ref CLLogits1DNormKernel
49 class CLSoftmaxLayer : public IFunction
53 CLSoftmaxLayer(std::shared_ptr<IMemoryManager> memory_manager = nullptr);
54 /** Set the input and output tensors.
56 * @param[in] input Source tensor. Data types supported: QS8/QASYMM8/QS16/F16/F32
57 * @param[out] output Destination tensor. Data types supported: same as @p input
58 * @param[in] beta (Optional) A scaling factor for the exponent. Defaults to 1.f
60 void configure(const ICLTensor *input, ICLTensor *output, float beta = 1.0f);
61 /** Static function to check if given info will lead to a valid configuration of @ref CLSoftmaxLayer
63 * @param[in] input Source tensor. Data types supported: QS8/QASYMM8/QS16/F16/F32
64 * @param[in] output Destination tensor. Data types supported: same as @p input
68 static Status validate(const ITensorInfo *input, const ITensorInfo *output);
70 // Inherited methods overridden:
74 CLMemoryGroup _memory_group;
75 CLLogits1DMaxShiftExpSumKernel _max_shift_exp_sum_kernel;
76 CLLogits1DNormKernel _norm_kernel;
82 #endif /* __ARM_COMPUTE_CLSOFTMAXLAYER_H__ */