aa165cc158063d08d1b81ecbc33139e3a07dcecb
[platform/core/ml/nnfw.git] / compute / ARMComputeEx / src / runtime / NEON / functions / NETransposeConvLayer.cpp
1 /*
2  * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16
17 /*
18  * Copyright (c) 2017-2020 ARM Limited.
19  *
20  * SPDX-License-Identifier: MIT
21  *
22  * Permission is hereby granted, free of charge, to any person obtaining a copy
23  * of this software and associated documentation files (the "Software"), to
24  * deal in the Software without restriction, including without limitation the
25  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
26  * sell copies of the Software, and to permit persons to whom the Software is
27  * furnished to do so, subject to the following conditions:
28  *
29  * The above copyright notice and this permission notice shall be included in all
30  * copies or substantial portions of the Software.
31  *
32  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
33  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
34  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
35  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
36  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
37  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
38  * SOFTWARE.
39  */
40 #include "arm_compute/runtime/NEON/functions/NETransposeConvLayer.h"
41
42 #include "arm_compute/core/Helpers.h"
43 #include "arm_compute/core/UtilsEx.h"
44 #include "arm_compute/core/Validate.h"
45 #include "arm_compute/core/utils/misc/ShapeCalculatorEx.h"
46 #include "arm_compute/runtime/NEON/NEScheduler.h"
47
48 using namespace arm_compute::misc::shape_calculator;
49
50 namespace arm_compute
51 {
52
53 NETransposeConvLayer::NETransposeConvLayer(std::shared_ptr<IMemoryManager> memory_manager) // NOLINT
54     : _memory_group(std::move(memory_manager)),
55       _conv_f(),
56       _upsample_f(),
57       _flip_weights(),
58       _scaled_output(),
59       _weights_flipped(),
60       _flip_axis(),
61       _original_weights(nullptr),
62       _input(nullptr),
63       _info(),
64       _is_prepared(false)
65 {
66 }
67
68 Status NETransposeConvLayer::validate(const ITensorInfo *input, const ITensorInfo *weights,
69                                       const ITensorInfo *bias, const ITensorInfo *output,
70                                       const PadStrideInfo &info, unsigned int invalid_right,
71                                       unsigned int invalid_bottom)
72 {
73   ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, weights, output);
74   ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F32, DataType::F16,
75                                                        DataType::QASYMM8, DataType::QASYMM8_SIGNED);
76   ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(weights, input);
77   ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(weights, input);
78   const unsigned int width_idx =
79       get_data_layout_dimension_index(weights->data_layout(), DataLayoutDimension::WIDTH);
80   const unsigned int height_idx =
81       get_data_layout_dimension_index(weights->data_layout(), DataLayoutDimension::HEIGHT);
82   ARM_COMPUTE_RETURN_ERROR_ON(weights->dimension(width_idx) != weights->dimension(height_idx));
83   ARM_COMPUTE_RETURN_ERROR_ON(weights->dimension(width_idx) < 1);
84
85   auto out_dims = transposeconv_output_dimensions(
86       input->dimension(width_idx), input->dimension(height_idx), weights->dimension(width_idx),
87       weights->dimension(height_idx), info, invalid_right, invalid_bottom);
88
89   ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, weights);
90   if (bias != nullptr)
91   {
92     if (is_data_type_quantized_asymmetric(input->data_type()))
93     {
94       ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(bias, 1, DataType::S32);
95     }
96     else
97     {
98       ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, bias);
99     }
100   }
101
102   if (output->tensor_shape().total_size() > 0)
103   {
104     ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
105
106     const TensorShape output_shape = compute_transposeconv_output_shape(out_dims, *input, *weights);
107
108     ARM_COMPUTE_RETURN_ERROR_ON_MSG(output->dimension(Window::DimX) != output_shape.x(),
109                                     "Output's width is invalid.");
110     ARM_COMPUTE_RETURN_ERROR_ON_MSG(output->dimension(Window::DimY) != output_shape.y(),
111                                     "Output's height is invalid.");
112     ARM_COMPUTE_RETURN_ERROR_ON_MSG(output->dimension(Window::DimZ) != output_shape.z(),
113                                     "Output's depth is invalid.");
114   }
115
116   unsigned int pad_left = 0;
117   unsigned int pad_right = 0;
118   unsigned int pad_top = 0;
119   unsigned int pad_bottom = 0;
120   const TensorShape scale_out_shape = compute_transposeconv_upsampled_shape(
121       *input, *weights, info, out_dims, invalid_right, invalid_bottom, pad_left, pad_right, pad_top,
122       pad_bottom);
123   TensorInfo scale_out_info(
124       input->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(scale_out_shape));
125   const PadStrideInfo conv_info(1, 1, 0, 0, 0, 0, DimensionRoundingType::CEIL);
126
127   const unsigned int batches_idx =
128       get_data_layout_dimension_index(weights->data_layout(), DataLayoutDimension::BATCHES);
129   const unsigned int channel_idx =
130       get_data_layout_dimension_index(weights->data_layout(), DataLayoutDimension::CHANNEL);
131   ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(batches_idx) !=
132                               scale_out_info.dimension(batches_idx));
133   ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(channel_idx) !=
134                               scale_out_info.dimension(channel_idx));
135
136   ARM_COMPUTE_RETURN_ON_ERROR(NEConvolutionLayer::validate(&scale_out_info, weights, bias, output,
137                                                            conv_info, WeightsInfo()));
138
139   return Status{};
140 }
141
142 void NETransposeConvLayer::configure(ITensor *input, const ITensor *weights, const ITensor *bias,
143                                      ITensor *output, const PadStrideInfo &info,
144                                      unsigned int invalid_right, unsigned int invalid_bottom)
145 {
146   // Perform validation step
147   ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights, output);
148   ARM_COMPUTE_ERROR_THROW_ON(NETransposeConvLayer::validate(
149       input->info(), weights->info(), (bias == nullptr) ? nullptr : bias->info(), output->info(),
150       info, invalid_right, invalid_bottom));
151
152   const DataLayout data_layout = input->info()->data_layout();
153   const unsigned int width_idx =
154       get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
155   const unsigned int height_idx =
156       get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
157   auto out_dims = transposeconv_output_dimensions(
158       input->info()->dimension(width_idx), input->info()->dimension(height_idx),
159       weights->info()->dimension(width_idx), weights->info()->dimension(height_idx), info,
160       invalid_right, invalid_bottom);
161
162   const TensorShape output_shape =
163       compute_transposeconv_output_shape(out_dims, *input->info(), *weights->info());
164
165   _input = input;
166   _original_weights = weights;
167   _info = info;
168   _is_prepared = false;
169
170   unsigned int pad_left = 0;
171   unsigned int pad_right = 0;
172   unsigned int pad_top = 0;
173   unsigned int pad_bottom = 0;
174   const unsigned int stride_x = info.stride().first;
175   const unsigned int stride_y = info.stride().second;
176
177   // Output auto initialization if not yet initialized
178   auto_init_if_empty(*output->info(), output_shape, 1, input->info()->data_type(),
179                      input->info()->quantization_info());
180
181   _flip_axis.allocator()->init(TensorInfo(TensorShape(2U), 1, DataType::U32));
182   _memory_group.manage(&_scaled_output);
183
184   _weights_flipped.allocator()->init(weights->info()->clone()->set_data_layout(data_layout));
185   _flip_weights.configure(weights, &_weights_flipped, &_flip_axis);
186
187   // setup the function to convolve the upscaled output
188   const PadStrideInfo conv_info(1, 1, 0, 0, 0, 0, DimensionRoundingType::CEIL);
189
190   const TensorShape scale_out_shape = compute_transposeconv_upsampled_shape(
191       *input->info(), *weights->info(), info, out_dims, invalid_right, invalid_bottom, pad_left,
192       pad_right, pad_top, pad_bottom);
193
194   const PadStrideInfo upsample_info(stride_x, stride_y, pad_left, pad_right, pad_top, pad_bottom,
195                                     DimensionRoundingType::FLOOR);
196
197   TensorInfo scale_out_info(scale_out_shape, 1, input->info()->data_type(),
198                             input->info()->quantization_info());
199   scale_out_info.set_data_layout(data_layout);
200   _scaled_output.allocator()->init(scale_out_info);
201
202   _upsample_f.configure(input, &_scaled_output, upsample_info);
203
204   _conv_f.configure(&_scaled_output, &_weights_flipped, bias, output, conv_info);
205
206   // Setup flip axis data
207   _flip_axis.allocator()->allocate();
208   auto axis_data = reinterpret_cast<uint32_t *>(_flip_axis.buffer());
209   axis_data[0] = static_cast<uint32_t>(width_idx);
210   axis_data[1] = static_cast<uint32_t>(height_idx);
211
212   _scaled_output.allocator()->allocate();
213 }
214
215 void NETransposeConvLayer::run()
216 {
217   prepare();
218
219   MemoryGroupResourceScope scope_mg(_memory_group);
220
221   _upsample_f.run();
222   _conv_f.run();
223 }
224
225 void NETransposeConvLayer::prepare()
226 {
227   if (!_is_prepared)
228   {
229     ARM_COMPUTE_ERROR_ON(!_original_weights->is_used());
230
231     // Run weights flipping and mark original weights tensor as unused
232     _weights_flipped.allocator()->allocate();
233     _flip_weights.run();
234     _original_weights->mark_as_unused();
235
236     // Prepare convolution
237     _conv_f.prepare();
238
239     _is_prepared = true;
240   }
241 }
242 } // namespace arm_compute