2 * Copyright (c) 2017-2018 ARM Limited.
4 * SPDX-License-Identifier: MIT
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 #ifndef __ARM_COMPUTE_NEGEMMCONVOLUTIONLAYER_H__
25 #define __ARM_COMPUTE_NEGEMMCONVOLUTIONLAYER_H__
27 #include "arm_compute/runtime/IFunction.h"
29 #include "arm_compute/core/NEON/kernels/NECol2ImKernel.h"
30 #include "arm_compute/core/NEON/kernels/NEFillBorderKernel.h"
31 #include "arm_compute/core/NEON/kernels/NEGEMMAssemblyBaseKernel.h"
32 #include "arm_compute/core/NEON/kernels/NEGEMMInterleave4x4Kernel.h"
33 #include "arm_compute/core/NEON/kernels/NEGEMMMatrixMultiplyKernel.h"
34 #include "arm_compute/core/NEON/kernels/NEGEMMTranspose1xWKernel.h"
35 #include "arm_compute/core/NEON/kernels/NEIm2ColKernel.h"
36 #include "arm_compute/core/NEON/kernels/NEWeightsReshapeKernel.h"
37 #include "arm_compute/core/Types.h"
38 #include "arm_compute/runtime/MemoryGroup.h"
39 #include "arm_compute/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.h"
40 #include "arm_compute/runtime/NEON/functions/NEGEMMLowpOutputStage.h"
41 #include "arm_compute/runtime/Tensor.h"
49 /** Function to reshape and perform 1xW transposition on the weights. This function calls the following kernels:
50 * -# @ref NEWeightsReshapeKernel
51 * -# @ref NEGEMMTranspose1xWKernel (executed in case GEMM is required for the operation)
53 class NEConvolutionLayerReshapeWeights : public IFunction
57 NEConvolutionLayerReshapeWeights(std::shared_ptr<IMemoryManager> memory_manager = nullptr);
58 /** Set the input and output tensors.
60 * @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM]. Data type supported: QS8/QASYMM8/QS16/F32.
61 * @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p weights.
62 * @param[out] output Destination tensor. Data types supported: Same as @p weights.
63 * @param[in] transpose1xW True if the weights are to undergo a 1xW transposition after reshaping (in case of GEMM operation), false otherwise.
64 * Data types supported: Same as @p weights.
66 void configure(const ITensor *weights, const ITensor *biases, ITensor *output, bool transpose1xW);
67 /** Static function to check if given info will lead to a valid configuration of @ref NEConvolutionLayerReshapeWeights
69 * @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM]. Data type supported: QS8/QASYMM8/QS16/F16/F32.
70 * @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p weights.
71 * @param[in] output Destination tensor. Data types supported: Same as @p weights.
72 * @param[in] transpose1xW True if the weights are to undergo a 1xW transposition after reshaping (in case of GEMM operation), false otherwise.
73 * Data types supported: Same as @p weights.
75 * @return an error status
77 static Status validate(const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, bool transpose1xW);
79 // Inherited methods overridden:
83 MemoryGroup _memory_group;
84 NEWeightsReshapeKernel _weights_reshape_kernel;
85 NEGEMMTranspose1xWKernel _weights_transposed_kernel;
86 Tensor _weights_reshaped;
90 /** Basic function to simulate a convolution layer. This function calls the following NEON kernels:
91 * -# @ref NEWeightsReshapeKernel (executed only once for each configuration)
92 * -# @ref NEIm2ColKernel
93 * -# @ref NEGEMMInterleave4x4Kernel (executed only in case GEMM is required for the operation)
94 * -# @ref NEGEMMMatrixMultiplyKernel or @ref NEGEMMLowpMatrixMultiplyCore (if quantized asymmetric)
95 * -# @ref NEGEMMLowpQuantizeDownInt32ToUint8Scale (if quantized asymmetric)
96 * -# @ref NECol2ImKernel
98 class NEGEMMConvolutionLayer : public IFunction
102 NEGEMMConvolutionLayer(const std::shared_ptr<IMemoryManager> &memory_manager = nullptr);
104 /** Set the input and output tensors.
106 * @param[in] input Source tensor. 3 lower dimensions represent a single input [width, height, IFM],
107 * while every optional dimension from 4 and above represent a batch of inputs.
108 * Data types supported: QS8/QASYMM8/QS16/F32.
109 * @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM]. Data type supported: Same as @p input.
110 * @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM].
111 * Data type supported: Should match @p input data type, except for input of QASYMM8 type where biases should be of S32 type.
112 * @param[out] output Destination tensor. 3 lower dimensions represent a single output [width, height, OFM], while the rest represent batch of outputs.
113 * Data types supported: Same as @p input.
114 * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo.
115 * @param[in] weights_info Specifies if the weights tensor has been reshaped with NEWeightsReshapeKernel. If this is not part of the fully connected layer the weights
116 * tensor has also been transposed with NEGEMMTranspose1xWKernel. Data type supported: Same as @p input.
118 void configure(const ITensor *input, const ITensor *weights, const ITensor *biases, ITensor *output, const PadStrideInfo &conv_info, const WeightsInfo &weights_info = WeightsInfo());
119 /** Static function to check if given info will lead to a valid configuration of @ref NEGEMMConvolutionLayer
121 * @param[in] input Source tensor. 3 lower dimensions represent a single input [width, height, IFM],
122 * while every optional dimension from 4 and above represent a batch of inputs.
123 * Data types supported: QS8/QASYMM8/QS16/F16/F32.
124 * @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM]. Data type supported:Same as @p input.
125 * @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM].
126 * Data type supported: Should match @p input data type, except for input of QASYMM8 type where biases should be of S32 type.
127 * @param[in] output Destination tensor. 3 lower dimensions represent a single output [width, height, OFM], while the rest represent batch of outputs.
128 * Data types supported: Same as @p input.
129 * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo.
130 * @param[in] weights_info Specifies if the weights tensor has been reshaped with NEWeightsReshapeKernel. If this is not part of the fully connected layer the weights
131 * tensor has also been transposed with NEGEMMTranspose1xWKernel. Data type supported: Same as @p input.
135 static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info,
136 const WeightsInfo &weights_info = WeightsInfo());
138 // Inherited methods overridden:
142 /** Configures the appropriate matrix multiply routine
144 * @param[in] input Input tensor. Data types supported: QS8/QASYMM8/QS16/F16/F32.
145 * @param[in] weights Weights tensor. Data type supported: Same as @p input.
146 * @param[out] output Output tensor. Data types supported: Same as @p input,
147 * except for input of QASYMM8 type where output should be of S32 type.
148 * @param[in] is_interleaved (Optional) True if input0 and input1 have been reshaped respectively using @ref CLGEMMInterleave4x4Kernel and @ref CLGEMMTranspose1xWKernel
149 * @param[in] reshape_info (Optional) GEMM reshape info. If is_interleaved_transposed = true, this object must contain the information to understand how the matrix A and matrix B have been reshaped
151 void configure_mm(const ITensor *input, const ITensor *weights, ITensor *output, bool is_interleaved, const GEMMReshapeInfo &reshape_info = GEMMReshapeInfo());
152 /** Prepare the appropriate assembly optimized kernel
154 * @param[in] ci CPU information
155 * @param[in] M M parameter of matrix multiplication
156 * @param[in] N N parameter of matrix multiplication
157 * @param[in] K K parameter of matrix multiplication
159 void configure_asm_mm(const struct CPUInfo &ci, int M, int N, int K);
162 MemoryGroup _memory_group;
163 NEIm2ColKernel _input_im2col_kernel;
164 NEGEMMInterleave4x4Kernel _input_interleave_kernel;
165 NEConvolutionLayerReshapeWeights _reshape_weights;
166 NEGEMMMatrixMultiplyKernel _mm_kernel;
167 std::unique_ptr<NEGEMMAssemblyBaseKernel> _mm_optimised_kernel;
168 NEGEMMLowpMatrixMultiplyCore _mm_gemmlowp;
169 NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint _gemmlowp_output_stage;
170 NECol2ImKernel _output_col2im_kernel;
172 Tensor _input_im2col_reshaped;
173 Tensor _input_interleaved_reshaped;
174 Tensor _weights_reshaped;
180 bool _is_fully_connected_convolution;
181 bool _are_weights_reshaped;
183 bool _is_interleaved;
186 #endif /* __ARM_COMPUTE_NECONVOLUTIONGEMMLAYER_H__ */