cb7557a5a69591bd636f3bbb7a71e43c498114c5
[platform/core/ml/nnfw.git] / compute / ARMComputeEx / src / runtime / NEON / functions / NEFullyConnectedLayerEx.cpp
1 /*
2  * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16
17 /*
18  * Copyright (c) 2017-2019 ARM Limited.
19  *
20  * SPDX-License-Identifier: MIT
21  *
22  * Permission is hereby granted, free of charge, to any person obtaining a copy
23  * of this software and associated documentation files (the "Software"), to
24  * deal in the Software without restriction, including without limitation the
25  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
26  * sell copies of the Software, and to permit persons to whom the Software is
27  * furnished to do so, subject to the following conditions:
28  *
29  * The above copyright notice and this permission notice shall be included in all
30  * copies or substantial portions of the Software.
31  *
32  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
33  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
34  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
35  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
36  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
37  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
38  * SOFTWARE.
39  */
40
41 #include "arm_compute/runtime/NEON/functions/NEFullyConnectedLayerEx.h"
42
43 #include "arm_compute/core/Helpers.h"
44 #include "arm_compute/core/Size2D.h"
45 #include "arm_compute/core/Validate.h"
46 #include "arm_compute/core/utils/misc/ShapeCalculator.h"
47 #include "arm_compute/core/utils/quantization/AsymmHelpers.h"
48 #include "arm_compute/runtime/NEON/NEScheduler.h"
49
50 #include <algorithm>
51 #include <cmath>
52
53 using namespace arm_compute;
54 using namespace arm_compute::misc::shape_calculator;
55
56 namespace
57 {
58 Status validate_mm(const ITensorInfo &input, const ITensorInfo &weights, const ITensorInfo &output)
59 {
60   if (is_data_type_quantized_asymmetric(input.data_type()))
61   {
62     // Since we need negative offsets for computing convolution, we need to change
63     // QuantizationInfo()
64     // Extract and negate input and weights offset
65     const QuantizationInfo input_quantization_info(input.quantization_info().uniform().scale,
66                                                    -input.quantization_info().uniform().offset);
67     const QuantizationInfo weights_quantization_info(weights.quantization_info().uniform().scale,
68                                                      -weights.quantization_info().uniform().offset);
69
70     // Validate gemmlowp function
71     ARM_COMPUTE_RETURN_ON_ERROR(NEGEMMLowpMatrixMultiplyCore::validate(
72         &input.clone()->set_quantization_info(input_quantization_info),
73         &weights.clone()->set_quantization_info(weights_quantization_info), nullptr, &output));
74   }
75   else
76   {
77     ARM_COMPUTE_RETURN_ON_ERROR(NEGEMM::validate(
78         &input, &weights, nullptr, &output, 1.f, 0.0f,
79         GEMMInfo(false, false, false /* Reshape weights only for the first run */)));
80   }
81
82   return Status{};
83 }
84 } // namespace
85
86 NEFullyConnectedLayerEx::NEFullyConnectedLayerEx(std::shared_ptr<IMemoryManager> memory_manager)
87     : _memory_group(std::move(memory_manager)), _flatten_kernel(), _convert_weights(),
88       _reshape_weights_function(), _mm_gemm(), _mm_gemmlowp(), _gemmlowp_output_stage(),
89       _accumulate_biases_kernel(), _flatten_output(), _gemmlowp_output(),
90       _converted_weights_output(), _reshape_weights_output(), _original_weights(nullptr),
91       _are_weights_converted(true), _are_weights_reshaped(false), _is_fc_after_conv(false),
92       _accumulate_biases(false), _is_quantized(false), _is_prepared(false)
93 {
94 }
95
96 void NEFullyConnectedLayerEx::configure_mm(const ITensor *input, const ITensor *weights,
97                                            ITensor *output)
98 {
99   if (_is_quantized)
100   {
101     // Since we need negative offsets for computing convolution, we need to change
102     // QuantizationInfo()
103     // Extract and negate input and weights offset
104     const QuantizationInfo input_quantization_info = input->info()->quantization_info();
105     const QuantizationInfo weights_quantization_info = weights->info()->quantization_info();
106
107     input->info()->set_quantization_info(QuantizationInfo(
108         input_quantization_info.uniform().scale, -input_quantization_info.uniform().offset));
109     weights->info()->set_quantization_info(QuantizationInfo(
110         weights_quantization_info.uniform().scale, -weights_quantization_info.uniform().offset));
111
112     // Configure gemmlowp function
113     _mm_gemmlowp.configure(input, weights, nullptr, output);
114
115     // Revert back QuantizatioInfo as input and weights could be used in other fully connected
116     // layers
117     input->info()->set_quantization_info(input_quantization_info);
118     weights->info()->set_quantization_info(weights_quantization_info);
119   }
120   else
121   {
122     // Configure matrix multiply kernel
123     _mm_gemm.configure(input, weights, nullptr, output, 1.f, 0.0f,
124                        GEMMInfo(false, false, false /* Reshape weights only for the first run */));
125   }
126 }
127
128 void NEFullyConnectedLayerEx::configure_conv_fc(const ITensor *input, const ITensor *weights,
129                                                 ITensor *output)
130 {
131   ARM_COMPUTE_ERROR_ON(
132       (weights->info()->dimension(1) !=
133        (input->info()->dimension(0) * input->info()->dimension(1) * input->info()->dimension(2))));
134
135   // If the fully connected layer is called after a convolution layer, the input tensor must be
136   // linearized
137
138   // Initialize output tensor for flatten
139   TensorShape shape_flatten = compute_flatten_shape(input->info());
140   _flatten_output.allocator()->init(
141       input->info()->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(
142           shape_flatten));
143
144   // Configure flatten kernel
145   _memory_group.manage(&_flatten_output);
146   _flatten_kernel.configure(input, &_flatten_output);
147
148   // Configure matrix multiply kernel
149   configure_mm(&_flatten_output, weights, output);
150
151   // Allocate the output tensor for flatten once all the configure methods have been called
152   _flatten_output.allocator()->allocate();
153 }
154
155 void NEFullyConnectedLayerEx::configure_fc_fc(const ITensor *input, const ITensor *weights,
156                                               ITensor *output)
157 {
158   ARM_COMPUTE_ERROR_ON(input->info()->dimension(0) != weights->info()->dimension(1));
159
160   // Configure matrix multiply kernel
161   configure_mm(input, weights, output);
162 }
163
164 void NEFullyConnectedLayerEx::configure(const ITensor *input, const ITensor *weights,
165                                         const ITensor *biases, ITensor *output,
166                                         FullyConnectedLayerInfo fc_info)
167 {
168   ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights, output);
169
170   // Perform validate step
171   ARM_COMPUTE_ERROR_THROW_ON(NEFullyConnectedLayerEx::validate(
172       input->info(), weights->info(), biases != nullptr ? biases->info() : nullptr, output->info(),
173       fc_info));
174
175   _are_weights_converted = true;
176   _are_weights_reshaped = fc_info.transpose_weights ? fc_info.are_weights_reshaped : true;
177   _is_fc_after_conv = true;
178   _accumulate_biases = false;
179   _is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
180   _original_weights = weights;
181
182   // Configure gemmlowp output
183   if (_is_quantized)
184   {
185     _gemmlowp_output.allocator()->init(
186         output->info()->clone()->set_is_resizable(true).reset_padding().set_data_type(
187             DataType::S32));
188   }
189
190   // Configure accumulate biases kernel for non quantized asymmetric types
191   if (biases != nullptr && !_is_quantized)
192   {
193     _accumulate_biases = true;
194
195     // Configure accumulate biases kernel
196     _accumulate_biases_kernel.configure(output, biases);
197   }
198
199   // With the Fully Connected layer we can have 4 different cases:
200   //  1) Convolution layer -> Fully Connected layer without batches
201   //  2) Fully Connected layer -> Fully Connected layer without batches
202   //  3) Convolution layer -> Fully Connected layer with batches
203   //  4) Fully Connected layer -> Fully Connected layer with batches
204
205   const ITensor *weights_to_use = weights;
206
207   // Check if we have a fully connected layer with batches
208   const bool is_batched_fc_layer = output->info()->dimension(1) > 1;
209   if (is_batched_fc_layer)
210   {
211     _is_fc_after_conv = (TensorShape::num_max_dimensions >= 4) &&
212                         (std::equal(input->info()->tensor_shape().cbegin() + 3,
213                                     input->info()->tensor_shape().cend(),
214                                     output->info()->tensor_shape().cbegin() + 1));
215   }
216   else
217   {
218     _is_fc_after_conv = input->info()->num_dimensions() > 1;
219   }
220
221   // Reshape weights if needed
222   if (!_are_weights_reshaped)
223   {
224     // Reshape the weights
225     _reshape_weights_function.configure(weights, &_reshape_weights_output);
226     weights_to_use = &_reshape_weights_output;
227   }
228
229   // Convert weights if needed
230   if (_is_fc_after_conv && (input->info()->data_layout() != fc_info.weights_trained_layout))
231   {
232     // Convert weights
233     _convert_weights.configure(weights_to_use, &_converted_weights_output,
234                                input->info()->tensor_shape(), fc_info.weights_trained_layout);
235
236     weights_to_use = &_converted_weights_output;
237     _are_weights_converted = false;
238   }
239
240   ITensor *tmp_output = (_is_quantized) ? &_gemmlowp_output : output;
241   if (_is_fc_after_conv)
242   {
243     // Fully Connected layer after a Convolution Layer without batches
244     configure_conv_fc(input, weights_to_use, tmp_output);
245   }
246   else
247   {
248     // Fully Connected layer after a Fully Connected Layer without batches
249     configure_fc_fc(input, weights_to_use, tmp_output);
250   }
251
252   // Configure output stage for asymmetric quantized types
253   if (_is_quantized)
254   {
255     float multiplier = input->info()->quantization_info().uniform().scale *
256                        weights->info()->quantization_info().uniform().scale /
257                        output->info()->quantization_info().uniform().scale;
258     int output_multiplier;
259     int output_shift;
260     quantization::calculate_quantized_multiplier_less_than_one(multiplier, &output_multiplier,
261                                                                &output_shift);
262     _gemmlowp_output_stage.configure(&_gemmlowp_output, biases, output, output_multiplier,
263                                      output_shift,
264                                      output->info()->quantization_info().uniform().offset);
265     _gemmlowp_output.allocator()->allocate();
266   }
267
268   _are_weights_reshaped = _are_weights_reshaped || fc_info.retain_internal_weights;
269 }
270
271 Status NEFullyConnectedLayerEx::validate(const ITensorInfo *input, const ITensorInfo *weights,
272                                          const ITensorInfo *biases, const ITensorInfo *output,
273                                          FullyConnectedLayerInfo fc_info)
274 {
275   ARM_COMPUTE_UNUSED(fc_info.retain_internal_weights);
276   ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, weights, output);
277   ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::F16,
278                                                        DataType::F32);
279   ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, weights, output);
280   ARM_COMPUTE_RETURN_ERROR_ON(weights->num_dimensions() > 2);
281
282   bool weights_reshaped = fc_info.transpose_weights ? fc_info.are_weights_reshaped : true;
283   bool is_fc_after_conv = true;
284   bool is_quantized = is_data_type_quantized_asymmetric(input->data_type());
285
286   const ITensorInfo &flatten_input =
287       TensorInfo(input->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(
288           compute_flatten_shape(input)));
289   const ITensorInfo &reshaped_weights =
290       TensorInfo(weights->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(
291           compute_transposed_shape(*weights)));
292   const ITensorInfo &converted_weights =
293       weights_reshaped ? TensorInfo(weights->clone()->set_is_resizable(true).reset_padding())
294                        : TensorInfo(*reshaped_weights.clone());
295   const ITensorInfo &gemmlowp_output = TensorInfo(
296       output->clone()->set_is_resizable(true).reset_padding().set_data_type(DataType::S32));
297
298   // Configure accumulate biases kernel for non quantized asymmetric types
299   if (biases != nullptr && !is_quantized)
300   {
301     ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, biases);
302     ARM_COMPUTE_RETURN_ON_ERROR(NEGEMMMatrixAccumulateBiasesKernel::validate(output, biases));
303   }
304
305   // With the Fully Connected layer we can have 4 different cases:
306   //  1) Convolution layer -> Fully Connected layer without batches
307   //  2) Fully Connected layer -> Fully Connected layer without batches
308   //  3) Convolution layer -> Fully Connected layer with batches
309   //  4) Fully Connected layer -> Fully Connected layer with batches
310
311   const ITensorInfo *input_to_use = input;
312   const ITensorInfo *weights_to_use = weights;
313   const ITensorInfo *tmp_output = (is_quantized) ? &gemmlowp_output : output;
314
315   // Check if we have a fully connected layer with batches
316   const bool is_batched_fc_layer = output->dimension(1) > 1;
317
318   if (is_batched_fc_layer)
319   {
320     is_fc_after_conv = (TensorShape::num_max_dimensions >= 4) &&
321                        (std::equal(input->tensor_shape().cbegin() + 3, input->tensor_shape().cend(),
322                                    output->tensor_shape().cbegin() + 1));
323   }
324   else
325   {
326     is_fc_after_conv = input->num_dimensions() > 1;
327   }
328
329   if (!weights_reshaped)
330   {
331     // Validate reshape weights kernel
332     ARM_COMPUTE_RETURN_ON_ERROR(
333         NEFullyConnectedLayerReshapeWeights::validate(weights, &reshaped_weights));
334     weights_to_use = &reshaped_weights;
335   }
336
337   if (is_fc_after_conv && (input->data_layout() != fc_info.weights_trained_layout))
338   {
339     // Validate convert weights kernel
340     ARM_COMPUTE_RETURN_ON_ERROR(NEConvertFullyConnectedWeights::validate(
341         weights_to_use, &converted_weights, input->tensor_shape(), fc_info.weights_trained_layout));
342     weights_to_use = &converted_weights;
343   }
344
345   if (is_fc_after_conv)
346   {
347     // Fully Connected layer after a Convolution Layer without batches
348     ARM_COMPUTE_RETURN_ERROR_ON(
349         (weights_to_use->dimension(1) !=
350          (input->dimension(0) * input->dimension(1) * input->dimension(2))));
351
352     // Validate flatten kernel
353     ARM_COMPUTE_RETURN_ON_ERROR(NEFlattenLayerKernel::validate(input, &flatten_input));
354     input_to_use = &flatten_input;
355   }
356   else
357   {
358     // Fully Connected layer after a Fully Connected Layer without batches
359     ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(0) != weights_to_use->dimension(1));
360   }
361   // Validate matrix multiply kernel
362   ARM_COMPUTE_RETURN_ON_ERROR(validate_mm(*input_to_use, *weights_to_use, *tmp_output));
363
364   // Validate output stage for asymmetric quantized types
365   if (is_quantized)
366   {
367     ARM_COMPUTE_RETURN_ON_ERROR(NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint::validate(
368         &gemmlowp_output, biases, output));
369   }
370
371   return Status{};
372 }
373
374 void NEFullyConnectedLayerEx::run()
375 {
376   if (!_is_prepared)
377   {
378     if (!_are_weights_reshaped)
379       _reshape_weights_output.allocator()->allocate();
380     if (!_are_weights_converted)
381       _converted_weights_output.allocator()->allocate();
382     _is_prepared = true;
383   }
384
385   {
386     ARM_COMPUTE_ERROR_ON(!_original_weights->is_used());
387
388     // Reshape of the weights
389     if (!_are_weights_reshaped)
390     {
391       _reshape_weights_function.run();
392     }
393
394     // Convert weights if needed
395     if (!_are_weights_converted)
396     {
397       _convert_weights.run();
398     }
399
400     // Prepare GEMM prepare
401     if (!_is_quantized)
402     {
403       _mm_gemm.prepare();
404     }
405   }
406
407   MemoryGroupResourceScope scope_mg(_memory_group);
408
409   // Linearize input if it comes from a convolutional layer
410   if (_is_fc_after_conv)
411   {
412     NEScheduler::get().schedule(&_flatten_kernel, Window::DimY);
413   }
414
415   // Run matrix multiply
416   if (_is_quantized)
417   {
418     _mm_gemmlowp.run();
419   }
420   else
421   {
422     _mm_gemm.run();
423   }
424
425   // Accumulate biases if provided
426   if (_is_quantized)
427   {
428     _gemmlowp_output_stage.run();
429   }
430   else
431   {
432     if (_accumulate_biases)
433     {
434       NEScheduler::get().schedule(&_accumulate_biases_kernel, Window::DimY);
435     }
436   }
437 }
438
439 void NEFullyConnectedLayerEx::prepare()
440 {
441 #if 0 // TODO Remove this block
442   if (!_is_prepared)
443   {
444     ARM_COMPUTE_ERROR_ON(!_original_weights->is_used());
445
446     auto release_unused = [](Tensor *w) {
447       if (!w->is_used())
448       {
449         w->allocator()->free();
450       }
451     };
452
453     // Pointer to current weights
454     const ITensor *cur_weights = _original_weights;
455
456     // Reshape of the weights (happens only once)
457     if (!_are_weights_reshaped)
458     {
459       // Run reshape weights kernel and mark weights as unused
460       _reshape_weights_output.allocator()->allocate();
461       _reshape_weights_function.run();
462
463       cur_weights->mark_as_unused();
464       cur_weights = &_reshape_weights_output;
465       _are_weights_reshaped = true;
466     }
467
468     // Convert weights if needed (happens only once)
469     if (!_are_weights_converted)
470     {
471       _converted_weights_output.allocator()->allocate();
472       _convert_weights.run();
473
474       cur_weights->mark_as_unused();
475       _are_weights_converted = true;
476     }
477
478     // Release reshaped weights if unused
479     release_unused(&_reshape_weights_output);
480
481     // Prepare GEMM prepare and release unused weights
482     if (!_is_quantized)
483     {
484       _mm_gemm.prepare();
485     }
486
487     // Release converted weights if unused
488     release_unused(&_reshape_weights_output);
489     release_unused(&_converted_weights_output);
490
491     _is_prepared = true;
492   }
493 #endif
494 }