a123439d9496da37b011dfe4c54f9fc025c3792a
[platform/core/ml/nnfw.git] / compute / ARMComputeEx / src / runtime / NEON / functions / NEFullyConnectedHybridLayer.cpp
1 /*
2  * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16
17 /*
18  * Copyright (c) 2017-2019 ARM Limited.
19  *
20  * SPDX-License-Identifier: MIT
21  *
22  * Permission is hereby granted, free of charge, to any person obtaining a copy
23  * of this software and associated documentation files (the "Software"), to
24  * deal in the Software without restriction, including without limitation the
25  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
26  * sell copies of the Software, and to permit persons to whom the Software is
27  * furnished to do so, subject to the following conditions:
28  *
29  * The above copyright notice and this permission notice shall be included in all
30  * copies or substantial portions of the Software.
31  *
32  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
33  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
34  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
35  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
36  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
37  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
38  * SOFTWARE.
39  */
40
41 #include "arm_compute/runtime/NEON/functions/NEFullyConnectedHybridLayer.h"
42
43 #include "arm_compute/core/Helpers.h"
44 #include "arm_compute/core/Size2D.h"
45 #include "arm_compute/core/Validate.h"
46 #include "arm_compute/core/utils/misc/ShapeCalculator.h"
47 #include "arm_compute/core/utils/quantization/AsymmHelpers.h"
48 #include "arm_compute/runtime/NEON/NEScheduler.h"
49
50 #include <algorithm>
51 #include <cmath>
52
53 using namespace arm_compute;
54 using namespace arm_compute::misc::shape_calculator;
55
56 namespace
57 {
58 Status validate_mm(const ITensorInfo &input, const ITensorInfo &weights, const ITensorInfo &output)
59 {
60   ARM_COMPUTE_RETURN_ON_ERROR(
61       NEGEMMLowpMatrixMultiplyCore::validate(&input, &weights, nullptr, &output));
62
63   return Status{};
64 }
65 } // namespace
66
67 void NEFullyConnectedHybridLayerReshapeWeights::configure(const ITensor *input, ITensor *output)
68 {
69   auto k = support::cpp14::make_unique<NETransposeKernel>();
70   k->configure(input, output);
71   _kernel = std::move(k);
72 }
73
74 Status NEFullyConnectedHybridLayerReshapeWeights::validate(const ITensorInfo *input,
75                                                            const ITensorInfo *output)
76 {
77   return NETransposeKernel::validate(input, output);
78 }
79
80 NEFullyConnectedHybridLayer::NEFullyConnectedHybridLayer(
81     std::shared_ptr<IMemoryManager> memory_manager)
82     : _memory_group(std::move(memory_manager)), _reshape_weights_function(), _quant_input_kernel(),
83       _mm_gemmlowp(), _accumulate_biases_kernel(), _reshape_weights_output(), _quantized_input(),
84       _scale_factor(), _original_weights(nullptr), _are_weights_reshaped(false),
85       _accumulate_biases(false), _is_prepared(false)
86 {
87 }
88
89 void NEFullyConnectedHybridLayer::configure_mm(const ITensor *input, const ITensor *weights,
90                                                ITensor *output)
91 {
92   ARM_COMPUTE_ERROR_ON(input->info()->dimension(0) != weights->info()->dimension(1));
93
94   // Configure gemmlowp function
95   _mm_gemmlowp.configure(input, weights, nullptr, output);
96 }
97
98 void NEFullyConnectedHybridLayer::configure(const ITensor *input, const ITensor *weights,
99                                             const ITensor *biases, ITensor *output,
100                                             FullyConnectedLayerInfo fc_info)
101 {
102   ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights, output);
103
104   // Perform validate step
105   ARM_COMPUTE_ERROR_THROW_ON(NEFullyConnectedHybridLayer::validate(
106       input->info(), weights->info(), biases != nullptr ? biases->info() : nullptr, output->info(),
107       fc_info));
108
109   _are_weights_reshaped = fc_info.transpose_weights ? fc_info.are_weights_reshaped : true;
110   _accumulate_biases = false;
111   _original_weights = weights;
112
113   // Configure accumulate biases kernel for non quantized asymmetric types
114   if (biases != nullptr)
115   {
116     _accumulate_biases = true;
117
118     // Configure accumulate biases kernel
119     _accumulate_biases_kernel.configure(output, biases);
120   }
121
122   // With the Fully Connected layer we can have 4 different cases:
123   //  1) Convolution layer -> Fully Connected layer without batches
124   //  2) Fully Connected layer -> Fully Connected layer without batches
125   //  3) Convolution layer -> Fully Connected layer with batches
126   //  4) Fully Connected layer -> Fully Connected layer with batches
127
128   const ITensor *weights_to_use = weights;
129
130   // Check if we have a fully connected layer with batches
131   const bool is_batched_fc_layer = output->info()->dimension(1) > 1;
132   bool _is_fc_after_conv;
133   if (is_batched_fc_layer)
134   {
135     _is_fc_after_conv = (TensorShape::num_max_dimensions >= 4) &&
136                         (std::equal(input->info()->tensor_shape().cbegin() + 3,
137                                     input->info()->tensor_shape().cend(),
138                                     output->info()->tensor_shape().cbegin() + 1));
139   }
140   else
141   {
142     _is_fc_after_conv = input->info()->num_dimensions() > 1 && input->info()->dimension(1) > 1;
143   }
144   ARM_COMPUTE_ERROR_ON_MSG(_is_fc_after_conv,
145                            "NEFullyConnectedHybridLayer does not support after conv");
146   (void)_is_fc_after_conv;
147
148   // Reshape weights if needed
149   if (!_are_weights_reshaped)
150   {
151     // Reshape the weights
152     _reshape_weights_output.allocator()->init(
153         weights->info()->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(
154             compute_transposed_shape(*weights->info())));
155     _reshape_weights_function.configure(weights_to_use, &_reshape_weights_output);
156     weights_to_use = &_reshape_weights_output;
157   }
158
159   // Quantize input
160   _quantized_input.allocator()->init(
161       input->info()->clone()->set_is_resizable(true).reset_padding().set_data_type(
162           DataType::QASYMM8_SIGNED));
163   _scale_factor.allocator()->init(
164       TensorInfo(TensorShape{output->info()->dimension(1)}, 1, DataType::F32));
165   _quant_input_kernel.configure(input, &_quantized_input, &_scale_factor);
166
167   // GEMM
168   _gemmlowp_output.allocator()->init(
169       output->info()->clone()->set_is_resizable(true).reset_padding().set_data_type(DataType::S32));
170   configure_mm(&_quantized_input, weights_to_use, &_gemmlowp_output);
171
172   // Multiply scale
173   _multiply_scale_kernel.configure(&_gemmlowp_output, &_scale_factor, output,
174                                    weights->info()->quantization_info().uniform().scale);
175
176   _are_weights_reshaped = _are_weights_reshaped || fc_info.retain_internal_weights;
177
178   _quantized_input.allocator()->allocate();
179   _scale_factor.allocator()->allocate();
180   _gemmlowp_output.allocator()->allocate();
181 }
182
183 Status NEFullyConnectedHybridLayer::validate(const ITensorInfo *input, const ITensorInfo *weights,
184                                              const ITensorInfo *biases, const ITensorInfo *output,
185                                              FullyConnectedLayerInfo fc_info)
186 {
187   ARM_COMPUTE_UNUSED(fc_info.retain_internal_weights);
188   ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, weights, output);
189   ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F16, DataType::F32);
190   ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(weights, 1, DataType::QASYMM8_SIGNED);
191   ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
192   ARM_COMPUTE_RETURN_ERROR_ON(weights->num_dimensions() > 2);
193   ARM_COMPUTE_RETURN_ERROR_ON(output->num_dimensions() > 2);
194
195   bool weights_reshaped = fc_info.transpose_weights ? fc_info.are_weights_reshaped : true;
196
197   const ITensorInfo &reshaped_weights =
198       TensorInfo(weights->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(
199           compute_transposed_shape(*weights)));
200
201   // Configure accumulate biases kernel for non quantized asymmetric types
202   if (biases != nullptr)
203   {
204     ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, biases);
205     ARM_COMPUTE_RETURN_ON_ERROR(NEGEMMMatrixAccumulateBiasesKernel::validate(output, biases));
206   }
207
208   // With the Fully Connected layer we can have 4 different cases:
209   //  1) Convolution layer -> Fully Connected layer without batches
210   //  2) Fully Connected layer -> Fully Connected layer without batches
211   //  3) Convolution layer -> Fully Connected layer with batches
212   //  4) Fully Connected layer -> Fully Connected layer with batches
213
214   const ITensorInfo *weights_to_use = weights;
215
216   if (!weights_reshaped)
217   {
218     // Validate reshape weights kernel
219     ARM_COMPUTE_RETURN_ON_ERROR(
220         NEFullyConnectedHybridLayerReshapeWeights::validate(weights_to_use, &reshaped_weights));
221     weights_to_use = &reshaped_weights;
222   }
223
224   // Fully Connected layer after a Fully Connected Layer without batches
225   ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(0) != weights_to_use->dimension(1));
226
227   // Validate quantization kernel
228   const ITensorInfo &quantized_input =
229       TensorInfo(input->clone()->set_is_resizable(true).reset_padding().set_data_type(
230           DataType::QASYMM8_SIGNED));
231   const ITensorInfo &scale_factor = TensorInfo(TensorShape{output->dimension(1)}, 1, DataType::F32);
232   ARM_COMPUTE_RETURN_ON_ERROR(
233       NEQuantizationSymmetricKernel::validate(input, &quantized_input, &scale_factor));
234
235   const ITensorInfo &gemmlowp_output = TensorInfo(
236       output->clone()->set_is_resizable(true).reset_padding().set_data_type(DataType::S32));
237   // Validate matrix multiply kernel
238   ARM_COMPUTE_RETURN_ON_ERROR(validate_mm(quantized_input, *weights_to_use, gemmlowp_output));
239
240   ARM_COMPUTE_RETURN_ON_ERROR(NEMultiplyScaleFactorKernel::validate(
241       &gemmlowp_output, &scale_factor, output, weights->quantization_info().uniform().scale));
242
243   return Status{};
244 }
245
246 void NEFullyConnectedHybridLayer::run()
247 {
248   prepare();
249
250   MemoryGroupResourceScope scope_mg(_memory_group);
251
252   // Quantize input
253   NEScheduler::get().schedule(&_quant_input_kernel, Window::DimY);
254
255   // Run matrix multiply
256   _mm_gemmlowp.run();
257
258   // Multiply scale factor
259   NEScheduler::get().schedule(&_multiply_scale_kernel, Window::DimY);
260
261   // Accumulate biases if provided
262   if (_accumulate_biases)
263   {
264     NEScheduler::get().schedule(&_accumulate_biases_kernel, Window::DimY);
265   }
266 }
267
268 void NEFullyConnectedHybridLayer::prepare()
269 {
270   if (!_is_prepared)
271   {
272     ARM_COMPUTE_ERROR_ON(!_original_weights->is_used());
273
274     auto release_unused = [](Tensor *w) {
275       if (!w->is_used())
276       {
277         w->allocator()->free();
278       }
279     };
280
281     // Reshape of the weights (happens only once)
282     if (!_are_weights_reshaped)
283     {
284       // Run reshape weights kernel and mark weights as unused
285       _reshape_weights_output.allocator()->allocate();
286       _reshape_weights_function.run();
287
288       _are_weights_reshaped = true;
289       // We can not release _original_weights because it can be used in other nodes
290     }
291
292     // Prepare GEMM prepare and release unused weights
293     _mm_gemmlowp.prepare();
294
295     // Release reshaped weights if unused
296     release_unused(&_reshape_weights_output);
297
298     _is_prepared = true;
299   }
300 }