Imported Upstream version 1.12.0
[platform/core/ml/nnfw.git] / compute / ARMComputeEx / src / core / CL / kernels / CLArgMinMaxLayerKernelEx.cpp
1 /*
2  * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16
17 /*
18  * Copyright (c) 2019-2020 ARM Limited.
19  *
20  * SPDX-License-Identifier: MIT
21  *
22  * Permission is hereby granted, free of charge, to any person obtaining a copy
23  * of this software and associated documentation files (the "Software"), to
24  * deal in the Software without restriction, including without limitation the
25  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
26  * sell copies of the Software, and to permit persons to whom the Software is
27  * furnished to do so, subject to the following conditions:
28  *
29  * The above copyright notice and this permission notice shall be included in all
30  * copies or substantial portions of the Software.
31  *
32  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
33  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
34  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
35  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
36  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
37  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
38  * SOFTWARE.
39  */
40 #include "arm_compute/core/CL/kernels/CLArgMinMaxLayerKernelEx.h"
41
42 #include "arm_compute/core/AccessWindowStatic.h"
43 #include "arm_compute/core/CL/CLHelpers.h"
44 #include "arm_compute/core/CL/CLKernelLibraryEx.h"
45 #include "arm_compute/core/CL/CLValidate.h"
46 #include "arm_compute/core/CL/ICLTensor.h"
47 #include "arm_compute/core/Helpers.h"
48 #include "arm_compute/core/TensorInfo.h"
49 #include "arm_compute/core/Utils.h"
50 #include "arm_compute/core/Validate.h"
51 #include "arm_compute/core/Window.h"
52
53 #include "support/StringSupport.h"
54
55 namespace arm_compute
56 {
57 namespace
58 {
59 constexpr unsigned int vector_size = 16;
60
61 Status validate_arguments(const ITensorInfo *input, const ITensorInfo *prev_output,
62                           const ITensorInfo *output, unsigned int axis, ReductionOperation op)
63 {
64   ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output);
65   ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input);
66   ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8,
67                                                        DataType::QASYMM8_SIGNED, DataType::S32,
68                                                        DataType::F16, DataType::F32);
69   ARM_COMPUTE_RETURN_ERROR_ON_MSG(op != ReductionOperation::ARG_IDX_MAX &&
70                                     op != ReductionOperation::ARG_IDX_MIN,
71                                   "Only ARG_IDX_MAX and ARG_IDX_MIN are supported");
72   ARM_COMPUTE_RETURN_ERROR_ON_MSG(axis >= TensorShape::num_max_dimensions,
73                                   "Reduction axis greater than max number of dimensions");
74   ARM_COMPUTE_RETURN_ERROR_ON_MSG(axis > 3, "Unsupported reduction axis");
75
76   if (output->total_size() != 0)
77   {
78     ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U32, DataType::S32,
79                                                          DataType::S64);
80   }
81   if (prev_output != nullptr && prev_output->total_size() != 0)
82   {
83     ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(prev_output, 1, DataType::U32,
84                                                          DataType::S32, DataType::S64);
85     if (output->total_size() != 0)
86     {
87       ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(prev_output, output);
88     }
89   }
90
91   return Status{};
92 }
93
94 std::tuple<Status, Window> validate_and_configure_window(ITensorInfo *input,
95                                                          ITensorInfo *prev_output,
96                                                          ITensorInfo *output, unsigned int axis,
97                                                          ReductionOperation op)
98 {
99   ARM_COMPUTE_UNUSED(op);
100   // Output tensor auto initialization if not yet initialized
101   TensorShape output_shape{input->tensor_shape()};
102   output_shape.set(axis, 1);
103   DataType output_data_type = (prev_output != nullptr) ? (prev_output->data_type()) : DataType::S32;
104   auto_init_if_empty(*output, input->clone()
105                                 ->set_tensor_shape(output_shape)
106                                 .set_data_type(output_data_type)
107                                 .reset_padding()
108                                 .set_is_resizable(true));
109
110   Window win =
111     calculate_max_window((prev_output != nullptr) ? (*prev_output) : (*input), Steps(vector_size));
112   bool window_changed = false;
113
114   switch (axis)
115   {
116     case 0:
117     {
118       ITensorInfo *input_tensor_access = prev_output != nullptr ? prev_output : input;
119       AccessWindowStatic input_access(input_tensor_access, 0, 0,
120                                       static_cast<int>(input_tensor_access->dimension(0)), 1);
121       AccessWindowHorizontal output_access(output, 0, 1);
122       window_changed = update_window_and_padding(win, input_access, output_access);
123       output_access.set_valid_region(win, ValidRegion(Coordinates(), output->tensor_shape()));
124     }
125     break;
126     case 1:
127     case 2:
128     case 3:
129     {
130       AccessWindowHorizontal input_access(input, 0, vector_size);
131       AccessWindowHorizontal output_access(output, 0, vector_size);
132       window_changed = update_window_and_padding(win, input_access, output_access);
133       output_access.set_valid_region(win, ValidRegion(Coordinates(), output->tensor_shape()));
134     }
135     break;
136     default:
137       ARM_COMPUTE_ERROR("Not supported");
138   }
139
140   Status err = (window_changed)
141                  ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!")
142                  : Status{};
143   return std::make_tuple(err, win);
144 }
145 } // namespace
146
147 CLArgMinMaxLayerKernelEx::CLArgMinMaxLayerKernelEx()
148   : _input(nullptr), _prev_output(nullptr), _output(nullptr), _reduction_axis(0),
149     _op(ReductionOperation::ARG_IDX_MAX)
150 {
151 }
152
153 void CLArgMinMaxLayerKernelEx::configure(const ICLTensor *input, const ICLTensor *prev_output,
154                                          ICLTensor *output, unsigned int axis,
155                                          ReductionOperation op)
156 {
157   ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
158   ARM_COMPUTE_ERROR_THROW_ON(
159     validate_arguments(input->info(), (prev_output != nullptr) ? prev_output->info() : nullptr,
160                        output->info(), axis, op));
161   auto win_config = validate_and_configure_window(
162     input->info(), (prev_output != nullptr) ? prev_output->info() : nullptr, output->info(), axis,
163     op);
164   ARM_COMPUTE_ERROR_THROW_ON(std::get<0>(win_config));
165
166   _input = input;
167   _prev_output = prev_output;
168   _output = output;
169   _reduction_axis = axis;
170   _op = op;
171
172   // Set build options
173   CLBuildOptions build_opts;
174
175   build_opts.add_option_if(_prev_output != nullptr, "-DPREV_OUTPUT");
176   build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(input->info()->data_type()));
177   build_opts.add_option_if(is_data_type_float(input->info()->data_type()), "-DFLOAT_DATA_TYPE");
178   build_opts.add_option_if_else(op == ReductionOperation::ARG_IDX_MAX, "-DARG_MAX", "-DARG_MIN");
179   build_opts.add_option("-DDATA_TYPE_OUTPUT=" +
180                         get_cl_type_from_data_type(output->info()->data_type()));
181   build_opts.add_option("-DDATA_TYPE_SELECT=" +
182                         get_cl_signed_type_from_element_size(input->info()->element_size()));
183
184   // Create kernel
185   cl::NDRange lws_hint = CLKernelLibrary::get().default_ndrange();
186   std::string kernel_axis_name;
187   switch (axis)
188   {
189     case 0:
190     {
191       const ICLTensor *input_for_width = prev_output != nullptr ? _prev_output : _input;
192       build_opts.add_option("-DWIDTH=" +
193                             support::cpp11::to_string(input_for_width->info()->dimension(0)));
194
195       kernel_axis_name = "x";
196       lws_hint = create_lws_hint_parallel_implementations(input_for_width->info()->dimension(0),
197                                                           vector_size);
198     }
199     break;
200     case 1:
201       build_opts.add_option("-DHEIGHT=" + support::cpp11::to_string(input->info()->dimension(1)));
202       kernel_axis_name = "y";
203       break;
204     case 2:
205       build_opts.add_option("-DDEPTH=" + support::cpp11::to_string(input->info()->dimension(2)));
206       kernel_axis_name = "z";
207       break;
208     case 3:
209       build_opts.add_option("-DDEPTH=" + support::cpp11::to_string(input->info()->dimension(2)));
210       build_opts.add_option("-DBATCH=" + support::cpp11::to_string(input->info()->dimension(3)));
211       kernel_axis_name = "w";
212       break;
213     default:
214       ARM_COMPUTE_ERROR("Not supported");
215   }
216   _kernel = static_cast<cl::Kernel>(CLKernelLibraryEx::get().create_kernel(
217     "arg_min_max_ex_" + kernel_axis_name, build_opts.options()));
218
219   // Configure kernel window
220   ICLKernel::configure_internal(std::get<1>(win_config), lws_hint);
221 }
222
223 Status CLArgMinMaxLayerKernelEx::validate(const ITensorInfo *input, const ITensorInfo *prev_output,
224                                           const ITensorInfo *output, unsigned int axis,
225                                           ReductionOperation op)
226 {
227   ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, prev_output, output, axis, op));
228   ARM_COMPUTE_RETURN_ON_ERROR(std::get<0>(validate_and_configure_window(
229     input->clone().get(), (prev_output != nullptr) ? prev_output->clone().get() : nullptr,
230     output->clone().get(), axis, op)));
231   return Status{};
232 }
233
234 void CLArgMinMaxLayerKernelEx::run(const Window &window, cl::CommandQueue &queue)
235 {
236   ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
237   ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(IKernel::window(), window);
238
239   switch (_reduction_axis)
240   {
241     case 0:
242     {
243       // Set out window
244       Window out_window(window);
245       out_window.set(Window::DimX, Window::Dimension(0, 0, 0));
246
247       // Get first input and output slices
248       Window in_slice = window.first_slice_window_2D();
249       Window out_slice = out_window.first_slice_window_2D();
250
251       // Reshape window
252       const unsigned int num_tensors = _prev_output != nullptr ? 3 : 2;
253
254       // Set local sums buffer
255       unsigned int local_res_size = lws_hint()[0] * _output->info()->element_size();
256       _kernel.setArg(num_arguments_per_2D_tensor() * num_tensors, local_res_size, nullptr);
257       do
258       {
259         unsigned int idx = 0;
260         add_2D_tensor_argument(idx, _input, in_slice);
261         if (_prev_output != nullptr)
262         {
263           add_2D_tensor_argument(idx, _prev_output, in_slice);
264         }
265         add_2D_tensor_argument(idx, _output, out_slice);
266         enqueue(queue, *this, in_slice, lws_hint());
267       } while (window.slide_window_slice_2D(in_slice) && window.slide_window_slice_2D(out_slice));
268     }
269     break;
270     case 1:
271     {
272       // Get first input and output slices
273       Window window_in{window};
274       window_in.set(Window::DimY, Window::Dimension(0, _input->info()->dimension(1),
275                                                     _input->info()->dimension(1)));
276       Window in_slice = window_in.first_slice_window_2D();
277       Window out_slice = window.first_slice_window_2D();
278
279       do
280       {
281         unsigned int idx = 0;
282         add_2D_tensor_argument(idx, _input, in_slice);
283         add_2D_tensor_argument(idx, _output, out_slice);
284         enqueue(queue, *this, in_slice, lws_hint());
285       } while (window_in.slide_window_slice_2D(in_slice) &&
286                window.slide_window_slice_2D(out_slice));
287     }
288     break;
289     case 2:
290     {
291       // Get first input and output slices
292       Window window_in{window};
293       window_in.set(Window::DimZ, Window::Dimension(0, _input->info()->dimension(2),
294                                                     _input->info()->dimension(2)));
295       Window in_slice = window_in.first_slice_window_3D();
296       Window out_slice = window.first_slice_window_3D();
297
298       do
299       {
300         unsigned int idx = 0;
301         add_3D_tensor_argument(idx, _input, in_slice);
302         add_3D_tensor_argument(idx, _output, out_slice);
303         enqueue(queue, *this, in_slice, lws_hint());
304       } while (window_in.slide_window_slice_3D(in_slice) &&
305                window.slide_window_slice_3D(out_slice));
306     }
307     break;
308     case 3:
309     {
310       // Get first input and output slices
311       Window window_in{window};
312       window_in.set(3, Window::Dimension(0, 1, 1));
313       Window in_slice = window_in.first_slice_window_4D();
314       Window out_slice = window.first_slice_window_4D();
315
316       do
317       {
318         unsigned int idx = 0;
319         add_4D_tensor_argument(idx, _input, in_slice);
320         add_4D_tensor_argument(idx, _output, out_slice);
321         enqueue(queue, *this, in_slice, lws_hint());
322       } while (window_in.slide_window_slice_4D(in_slice) &&
323                window.slide_window_slice_4D(out_slice));
324     }
325     break;
326     default:
327       ARM_COMPUTE_ERROR("Not supported");
328   }
329 }
330 } // namespace arm_compute