arm_compute v18.05
[platform/upstream/armcl.git] / src / core / NEON / kernels / NEDepthwiseConvolutionLayer3x3Kernel.cpp
1 /*
2  * Copyright (c) 2017-2018 ARM Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #include "arm_compute/core/NEON/kernels/NEDepthwiseConvolutionLayer3x3Kernel.h"
25 #include "arm_compute/core/NEON/kernels/detail/NEDirectConvolutionDetail.h"
26
27 #include "arm_compute/core/AccessWindowStatic.h"
28 #include "arm_compute/core/AccessWindowTranspose.h"
29 #include "arm_compute/core/Coordinates.h"
30 #include "arm_compute/core/Error.h"
31 #include "arm_compute/core/Helpers.h"
32 #include "arm_compute/core/ITensor.h"
33 #include "arm_compute/core/NEON/INEKernel.h"
34 #include "arm_compute/core/TensorInfo.h"
35 #include "arm_compute/core/TensorShape.h"
36 #include "arm_compute/core/Types.h"
37 #include "arm_compute/core/Utils.h"
38 #include "arm_compute/core/Validate.h"
39 #include "arm_compute/core/Window.h"
40 #include "arm_compute/core/utils/misc/ShapeCalculator.h"
41 #include "support/ToolchainSupport.h"
42
43 using namespace arm_compute;
44 using namespace arm_compute::detail;
45 using namespace arm_compute::misc::shape_calculator;
46 using namespace depthwise;
47
48 namespace
49 {
50 template <typename T1, typename T2, unsigned int stridex>
51 class convolver_3x3
52 {
53 public:
54     static void convolve(const Window &window, unsigned int num_elems_written_per_iteration,
55                          const ITensor *input, const ITensor *weights, ITensor *output, const PadStrideInfo &conv_info, unsigned int depth_multiplier)
56     {
57         const int input_offset   = -input->info()->quantization_info().offset;
58         const int weights_offset = -weights->info()->quantization_info().offset;
59
60         const int          input_stride_x  = input->info()->strides_in_bytes().x();
61         const int          input_stride_y  = input->info()->strides_in_bytes().y();
62         const int          input_stride_z  = input->info()->strides_in_bytes().z();
63         const int          output_stride_y = output->info()->strides_in_bytes().y();
64         const int          kernel_stride_y = weights->info()->strides_in_bytes().y();
65         const int          kernel_stride_z = weights->info()->strides_in_bytes().z();
66         const int          output_w        = output->info()->dimension(0);
67         const int          output_h        = output->info()->dimension(1);
68         const int          delta_input     = get_input_num_elems_processed<stridex>(num_elems_written_per_iteration);
69         const unsigned int conv_stride_y   = std::get<1>(conv_info.stride());
70         const unsigned int conv_pad_x      = conv_info.pad_left();
71         const unsigned int conv_pad_y      = conv_info.pad_top();
72
73         // setup output window for the iterator
74         Window window_out = window;
75         window_out.set(Window::DimX, Window::Dimension(0, output->info()->dimension(Window::DimX), output->info()->dimension(Window::DimX)));
76         window_out.set(Window::DimY, Window::Dimension(0, output->info()->dimension(Window::DimY), output->info()->dimension(Window::DimY)));
77
78         // setup input window for the iterator
79         Window window_in = window;
80         // we just want execute_window_loop to iterate over the dimensions > 2, so we set the first 2 dimensions to 0
81         window_in.set(Window::DimX, Window::Dimension(0, 0, 0));
82         window_in.set(Window::DimY, Window::Dimension(0, 0, 0));
83
84         Window window_k = calculate_max_window(*weights->info(), Steps(1u));
85
86         Iterator in(input, window_in);
87         Iterator out(output, window_out);
88         Iterator w(weights, window_k);
89
90         const uint8_t *weights_ptr = w.ptr();
91
92         execute_window_loop(window_out, [&](const Coordinates & id)
93         {
94             int ih = 0;
95             int oh = 0;
96
97             const uint8_t *input_ptr        = in.ptr() - conv_pad_x * input_stride_x - conv_pad_y * input_stride_y - (id.z() - id.z() / depth_multiplier) * input_stride_z;
98             const uint8_t *ptr_weights_base = weights_ptr + id.z() * kernel_stride_z;
99
100             const auto ptr_weights_r0 = reinterpret_cast<const T1 *>(ptr_weights_base);
101             const auto ptr_weights_r1 = reinterpret_cast<const T1 *>(ptr_weights_base + kernel_stride_y);
102             const auto ptr_weights_r2 = reinterpret_cast<const T1 *>(ptr_weights_base + kernel_stride_y * 2);
103             const auto vw_r0          = load_matrix_row(ptr_weights_r0, weights_offset);
104             const auto vw_r1          = load_matrix_row(ptr_weights_r1, weights_offset);
105             const auto vw_r2          = load_matrix_row(ptr_weights_r2, weights_offset);
106
107             for(ih = 0, oh = 0; oh < output_h; ++oh, ih += conv_stride_y)
108             {
109                 auto in_top = reinterpret_cast<const T1 *>(input_ptr + (ih + 0) * input_stride_y);
110                 auto in_mid = reinterpret_cast<const T1 *>(input_ptr + (ih + 1) * input_stride_y);
111                 auto in_low = reinterpret_cast<const T1 *>(input_ptr + (ih + 2) * input_stride_y);
112                 auto p_out  = reinterpret_cast<T2 *>(out.ptr() + oh * output_stride_y);
113
114                 for(int ow = 0; ow < output_w; ow += num_elems_written_per_iteration,
115                     in_top += delta_input, in_mid += delta_input, in_low += delta_input,
116                     p_out += num_elems_written_per_iteration)
117                 {
118                     auto vres = convolve_3x3<stridex>(in_top, in_mid, in_low, vw_r0, vw_r1, vw_r2, 0, input_offset);
119                     store_results<stridex>(p_out, vres);
120                 }
121             }
122         },
123         in, out);
124     }
125 };
126
127 template <typename T1, typename T2>
128 inline void convolve_3x3(const Window &window, unsigned int num_elems_written_per_iteration,
129                          const ITensor *input, const ITensor *weights, ITensor *output, const PadStrideInfo &conv_info, unsigned int depth_multiplier)
130 {
131     const unsigned int conv_stride_x = std::get<0>(conv_info.stride());
132     switch(conv_stride_x)
133     {
134         case 1:
135             convolver_3x3<T1, T2, 1>::convolve(window, num_elems_written_per_iteration, input, weights, output, conv_info, depth_multiplier);
136             break;
137         case 2:
138             convolver_3x3<T1, T2, 2>::convolve(window, num_elems_written_per_iteration, input, weights, output, conv_info, depth_multiplier);
139             break;
140         case 3:
141             convolver_3x3<T1, T2, 3>::convolve(window, num_elems_written_per_iteration, input, weights, output, conv_info, depth_multiplier);
142             break;
143         default:
144             ARM_COMPUTE_ERROR("Not implemented");
145     }
146 }
147 } // namespace
148
149 NEDepthwiseConvolutionLayer3x3Kernel::NEDepthwiseConvolutionLayer3x3Kernel()
150     : _border_size(0), _input(), _output(), _weights(), _conv_info(), _convolver(nullptr), _num_elems_written_per_iteration(0), _run_optimized(false), _depth_multiplier(1)
151 {
152 }
153
154 BorderSize NEDepthwiseConvolutionLayer3x3Kernel::border_size() const
155 {
156     return _border_size;
157 }
158
159 void NEDepthwiseConvolutionLayer3x3Kernel::configure(const ITensor *input, const ITensor *weights, ITensor *output, const PadStrideInfo &conv_info, unsigned int depth_multiplier,
160                                                      DataLayout data_layout)
161 {
162     ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::F32);
163     ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, weights);
164
165     _input            = input;
166     _output           = output;
167     _weights          = weights;
168     _conv_info        = conv_info;
169     _depth_multiplier = depth_multiplier;
170     _convolver        = nullptr;
171
172     _run_optimized = NEDepthwiseConvolutionLayer3x3Kernel::is_optimized_execution_possible(input->info()->tensor_shape(),
173                                                                                            conv_info,
174                                                                                            input->info()->data_type(), depth_multiplier,
175                                                                                            data_layout);
176
177     (_run_optimized) ? configure_optimized() : configure_generic();
178 }
179
180 void NEDepthwiseConvolutionLayer3x3Kernel::run(const Window &window, const ThreadInfo &info)
181 {
182     ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
183     ARM_COMPUTE_UNUSED(info);
184
185     (_run_optimized) ? run_optimized(window, info) : run_generic(window, info);
186 }
187
188 bool NEDepthwiseConvolutionLayer3x3Kernel::is_optimized_execution_possible(TensorShape input_shape, PadStrideInfo conv_info, DataType dt, unsigned int depth_multiplier, DataLayout data_layout)
189 {
190     // Reshape input shape if in NHWC format
191     TensorShape in_shape{ input_shape };
192     if(data_layout == DataLayout::NHWC)
193     {
194         in_shape.set(Window::DimX, input_shape.y());
195         in_shape.set(Window::DimY, input_shape.z());
196         in_shape.set(Window::DimZ, input_shape.x());
197     }
198
199     // Check supported data type
200     bool supported_datatype = (dt == DataType::F32);
201
202     // Check for supported strides
203     const auto &strides           = conv_info.stride();
204     bool        supported_strides = (strides.first == strides.second) && ((strides.first == 1) || (strides.first == 2));
205
206     // Check for supported padding
207     const auto    pad_top           = conv_info.pad_top();
208     const auto    pad_right         = conv_info.pad_right();
209     const auto    pad_bottom        = conv_info.pad_bottom();
210     const auto    pad_left          = conv_info.pad_left();
211     PadStrideInfo same_pad          = calculate_same_pad(in_shape, TensorShape(3U, 3U), conv_info);
212     bool          is_same_padding   = (pad_top == same_pad.pad_top()) && (pad_right == same_pad.pad_right()) && (pad_bottom == same_pad.pad_bottom()) && (pad_left == same_pad.pad_left());
213     bool          is_valid_padding  = (pad_top == 0) && (pad_right == 0) && (pad_bottom == 0) && (pad_left == 0);
214     bool          supported_padding = is_same_padding || is_valid_padding;
215
216     return supported_datatype && supported_strides && supported_padding && (depth_multiplier == 1);
217 }
218
219 void NEDepthwiseConvolutionLayer3x3Kernel::generate_convolver()
220 {
221     ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(_input, 1, DataType::F32);
222     ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(_input, _weights);
223     ARM_COMPUTE_ERROR_ON(_weights->info()->dimension(1) != 3 || _weights->info()->dimension(2) != 3);
224
225     _convolver = create_convolver_object(_conv_info, _weights, _input, _output, true);
226 }
227
228 void NEDepthwiseConvolutionLayer3x3Kernel::configure_generic()
229 {
230     ARM_COMPUTE_ERROR_ON(_weights->info()->dimension(0) != 3 || _weights->info()->dimension(1) != 3);
231
232     // Get convolved dimensions
233     const TensorShape output_shape = compute_depthwise_convolution_shape(*_input->info(), *_weights->info(), _conv_info, _depth_multiplier);
234     const DataType    output_dt    = (_input->info()->data_type() == DataType::QASYMM8) ? DataType::S32 : _input->info()->data_type();
235
236     // Output auto inizialitation if not yet initialized
237     auto_init_if_empty(*_output->info(),
238                        _input->info()->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(output_shape).set_data_type(output_dt));
239
240     ARM_COMPUTE_ERROR_ON_MISMATCHING_DIMENSIONS(_output->info()->tensor_shape(), output_shape);
241
242     const unsigned int conv_stride_x   = _conv_info.stride().first;
243     const unsigned int conv_stride_y   = _conv_info.stride().second;
244     const unsigned int conv_pad_top    = _conv_info.pad_top();
245     const unsigned int conv_pad_right  = _conv_info.pad_right();
246     const unsigned int conv_pad_bottom = _conv_info.pad_bottom();
247     const unsigned int conv_pad_left   = _conv_info.pad_left();
248
249     ARM_COMPUTE_ERROR_ON(conv_stride_x < 1 || conv_stride_x > 3);
250
251     unsigned int num_elems_read_per_iteration = 0;
252     switch(_input->info()->data_type())
253     {
254         case DataType::QASYMM8:
255             num_elems_read_per_iteration     = 16;
256             _num_elems_written_per_iteration = 16 >> conv_stride_x;
257             break;
258         case DataType::F32:
259             num_elems_read_per_iteration     = 12;
260             _num_elems_written_per_iteration = 16 >> conv_stride_x;
261             break;
262         default:
263             ARM_COMPUTE_ERROR("Data type not supported.");
264     }
265     _border_size = BorderSize(conv_pad_top, conv_pad_right, conv_pad_bottom, conv_pad_left);
266
267     // Configure kernel window
268     Window win = calculate_max_window(*_output->info(), Steps(_num_elems_written_per_iteration));
269
270     AccessWindowRectangle input_access(_input->info(), -conv_pad_left, -conv_pad_top,
271                                        num_elems_read_per_iteration, 3,
272                                        conv_stride_x, conv_stride_y);
273     AccessWindowStatic     weights_access(_weights->info(), 0, 0, 3, 3);
274     AccessWindowHorizontal output_access(_output->info(), 0, _num_elems_written_per_iteration);
275
276     update_window_and_padding(win, input_access, weights_access, output_access);
277     output_access.set_valid_region(win, ValidRegion(Coordinates(), _output->info()->tensor_shape()));
278
279     INEKernel::configure(win);
280 }
281
282 void NEDepthwiseConvolutionLayer3x3Kernel::configure_optimized()
283 {
284     ARM_COMPUTE_ERROR_ON(_weights->info()->dimension(1) != 3 || _weights->info()->dimension(2) != 3);
285
286     _border_size = BorderSize(0, 0);
287     _convolver   = create_convolver_object(_conv_info, _weights, _input, _output);
288
289     // Auto-configure output
290     bool        same_padding = _conv_info.has_padding();
291     TensorShape output_shape{ _input->info()->tensor_shape() };
292
293     output_shape.set(1, _convolver->output_size(output_shape.y(), same_padding)); // Set width
294     output_shape.set(2, _convolver->output_size(output_shape.z(), same_padding)); // Set height
295
296     // Output auto inizialitation if not yet initialized
297     auto_init_if_empty(*_output->info(),
298                        _input->info()->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(output_shape));
299
300     // Set padding in channels
301     const int num_channels = _weights->info()->dimension(0);
302     if((num_channels >= 128) && (num_channels % 16 == 0))
303     {
304         _input->info()->extend_padding(PaddingSize(0, 4, 0, 0));
305         _weights->info()->extend_padding(PaddingSize(0, 4, 0, 0));
306         _output->info()->extend_padding(PaddingSize(0, 4, 0, 0));
307     }
308
309     // Configure window
310     Window win;
311     auto   win_last = _convolver->get_window();
312     win.set(Window::DimX, Window::Dimension(0, win_last, 1));
313     INEKernel::configure(win);
314 }
315
316 void NEDepthwiseConvolutionLayer3x3Kernel::run_generic(const Window &window, const ThreadInfo &info)
317 {
318     ARM_COMPUTE_UNUSED(info);
319
320     switch(_input->info()->data_type())
321     {
322         case DataType::F32:
323             convolve_3x3<float, float>(window, _num_elems_written_per_iteration, _input, _weights, _output, _conv_info, _depth_multiplier);
324             break;
325         case DataType::QASYMM8:
326             convolve_3x3<uint8_t, int32_t>(window, _num_elems_written_per_iteration, _input, _weights, _output, _conv_info, _depth_multiplier);
327             break;
328         default:
329             ARM_COMPUTE_ERROR("Not implemented");
330     }
331 }
332
333 void NEDepthwiseConvolutionLayer3x3Kernel::run_optimized(const Window &window, const ThreadInfo &info)
334 {
335     ARM_COMPUTE_UNUSED(info);
336     ARM_COMPUTE_ERROR_ON(!_convolver);
337
338     const size_t start = window.x().start();
339     const size_t end   = window.x().end();
340     _convolver->run(start, end);
341 }
342
343 std::unique_ptr<depthwise::IDepthwiseConvolution> NEDepthwiseConvolutionLayer3x3Kernel::create_convolver_object(PadStrideInfo  conv_info,
344                                                                                                                 const ITensor *w,
345                                                                                                                 const ITensor *in,
346                                                                                                                 ITensor       *out,
347                                                                                                                 bool           setup_strides)
348 {
349     const TensorShape shape               = in->info()->tensor_shape();
350     const int         in_rows             = shape.z();
351     const int         in_cols             = shape.y();
352     const int         n_batches           = shape[3];
353     const int         n_channels          = shape.x();
354     const bool        padding_same        = conv_info.has_padding();
355     const int         weight_col_stride   = (setup_strides) ? w->info()->strides_in_bytes().y() / w->info()->element_size() : 0;
356     const int         weight_row_stride   = (setup_strides) ? w->info()->strides_in_bytes().z() / w->info()->element_size() : 0;
357     const int         input_col_stride    = (setup_strides) ? in->info()->strides_in_bytes().y() / in->info()->element_size() : 0;
358     const int         input_row_stride    = (setup_strides) ? in->info()->strides_in_bytes().z() / in->info()->element_size() : 0;
359     const int         input_batch_stride  = (setup_strides) ? in->info()->strides_in_bytes()[3] / in->info()->element_size() : 0;
360     const int         output_col_stride   = (setup_strides) ? out->info()->strides_in_bytes().y() / out->info()->element_size() : 0;
361     const int         output_row_stride   = (setup_strides) ? out->info()->strides_in_bytes().z() / out->info()->element_size() : 0;
362     const int         output_batch_stride = (setup_strides) ? out->info()->strides_in_bytes()[3] / out->info()->element_size() : 0;
363
364     const auto stride_x = conv_info.stride().first;
365     switch(stride_x)
366     {
367         case 1:
368             return arm_compute::support::cpp14::make_unique<DepthwiseConvolution<4, 4, 3, 3, 1, 1, float, float>>(
369                        n_batches,
370                        in_rows,
371                        in_cols,
372                        n_channels,
373                        padding_same,
374                        reinterpret_cast<const float *>(w->ptr_to_element(Coordinates())),
375                        reinterpret_cast<float *>(in->ptr_to_element(Coordinates())),
376                        reinterpret_cast<float *>(out->ptr_to_element(Coordinates())),
377                        weight_col_stride, weight_row_stride,
378                        input_col_stride, input_row_stride, input_batch_stride,
379                        output_col_stride, output_row_stride, output_batch_stride);
380         case 2:
381             return arm_compute::support::cpp14::make_unique<DepthwiseConvolution<3, 3, 3, 3, 2, 2, float, float>>(
382                        n_batches,
383                        in_rows,
384                        in_cols,
385                        n_channels,
386                        padding_same,
387                        reinterpret_cast<const float *>(w->ptr_to_element(Coordinates())),
388                        reinterpret_cast<float *>(in->ptr_to_element(Coordinates())),
389                        reinterpret_cast<float *>(out->ptr_to_element(Coordinates())),
390                        weight_col_stride, weight_row_stride,
391                        input_col_stride, input_row_stride, input_batch_stride,
392                        output_col_stride, output_row_stride, output_batch_stride);
393         default:
394             return nullptr;
395     }
396 }