047004d5edfd4fa83c8788e75f69fdb57c7adc9b
[platform/core/ml/nnfw.git] / compute / ARMComputeEx / src / core / CL / kernels / CLArgMinMaxLayerKernelEx.cpp
1 /*
2  * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16
17 /*
18  * Copyright (c) 2019-2020 ARM Limited.
19  *
20  * SPDX-License-Identifier: MIT
21  *
22  * Permission is hereby granted, free of charge, to any person obtaining a copy
23  * of this software and associated documentation files (the "Software"), to
24  * deal in the Software without restriction, including without limitation the
25  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
26  * sell copies of the Software, and to permit persons to whom the Software is
27  * furnished to do so, subject to the following conditions:
28  *
29  * The above copyright notice and this permission notice shall be included in all
30  * copies or substantial portions of the Software.
31  *
32  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
33  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
34  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
35  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
36  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
37  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
38  * SOFTWARE.
39  */
40 #include "arm_compute/core/CL/kernels/CLArgMinMaxLayerKernelEx.h"
41
42 #include "arm_compute/core/AccessWindowStatic.h"
43 #include "arm_compute/core/CL/CLHelpers.h"
44 #include "arm_compute/core/CL/CLKernelLibraryEx.h"
45 #include "arm_compute/core/CL/CLValidate.h"
46 #include "arm_compute/core/CL/ICLTensor.h"
47 #include "arm_compute/core/Helpers.h"
48 #include "arm_compute/core/TensorInfo.h"
49 #include "arm_compute/core/Utils.h"
50 #include "arm_compute/core/Validate.h"
51 #include "arm_compute/core/Window.h"
52
53 #include "support/StringSupport.h"
54
55 namespace arm_compute
56 {
57 namespace
58 {
59 constexpr unsigned int vector_size = 16;
60
61 Status validate_arguments(const ITensorInfo *input, const ITensorInfo *prev_output,
62                           const ITensorInfo *output, unsigned int axis, ReductionOperation op)
63 {
64   ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output);
65   ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input);
66   ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::S32,
67                                                        DataType::F16, DataType::F32);
68   ARM_COMPUTE_RETURN_ERROR_ON_MSG(op != ReductionOperation::ARG_IDX_MAX &&
69                                       op != ReductionOperation::ARG_IDX_MIN,
70                                   "Only ARG_IDX_MAX and ARG_IDX_MIN are supported");
71   ARM_COMPUTE_RETURN_ERROR_ON_MSG(axis >= TensorShape::num_max_dimensions,
72                                   "Reduction axis greater than max number of dimensions");
73   ARM_COMPUTE_RETURN_ERROR_ON_MSG(axis > 3, "Unsupported reduction axis");
74
75   if (output->total_size() != 0)
76   {
77     ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U32, DataType::S32,
78                                                          DataType::S64);
79   }
80   if (prev_output != nullptr && prev_output->total_size() != 0)
81   {
82     ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(prev_output, 1, DataType::U32,
83                                                          DataType::S32, DataType::S64);
84     if (output->total_size() != 0)
85     {
86       ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(prev_output, output);
87     }
88   }
89
90   return Status{};
91 }
92
93 std::tuple<Status, Window> validate_and_configure_window(ITensorInfo *input,
94                                                          ITensorInfo *prev_output,
95                                                          ITensorInfo *output, unsigned int axis,
96                                                          ReductionOperation op)
97 {
98   ARM_COMPUTE_UNUSED(op);
99   // Output tensor auto initialization if not yet initialized
100   TensorShape output_shape{input->tensor_shape()};
101   output_shape.set(axis, 1);
102   DataType output_data_type = (prev_output != nullptr) ? (prev_output->data_type()) : DataType::S32;
103   auto_init_if_empty(*output, input->clone()
104                                   ->set_tensor_shape(output_shape)
105                                   .set_data_type(output_data_type)
106                                   .reset_padding()
107                                   .set_is_resizable(true));
108
109   Window win = calculate_max_window((prev_output != nullptr) ? (*prev_output) : (*input),
110                                     Steps(vector_size));
111   bool window_changed = false;
112
113   switch (axis)
114   {
115     case 0:
116     {
117       ITensorInfo *input_tensor_access = prev_output != nullptr ? prev_output : input;
118       AccessWindowStatic input_access(input_tensor_access, 0, 0,
119                                       static_cast<int>(input_tensor_access->dimension(0)), 1);
120       AccessWindowHorizontal output_access(output, 0, 1);
121       window_changed = update_window_and_padding(win, input_access, output_access);
122       output_access.set_valid_region(win, ValidRegion(Coordinates(), output->tensor_shape()));
123     }
124     break;
125     case 1:
126     case 2:
127     case 3:
128     {
129       AccessWindowHorizontal input_access(input, 0, vector_size);
130       AccessWindowHorizontal output_access(output, 0, vector_size);
131       window_changed = update_window_and_padding(win, input_access, output_access);
132       output_access.set_valid_region(win, ValidRegion(Coordinates(), output->tensor_shape()));
133     }
134     break;
135     default:
136       ARM_COMPUTE_ERROR("Not supported");
137   }
138
139   Status err = (window_changed)
140                    ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!")
141                    : Status{};
142   return std::make_tuple(err, win);
143 }
144 } // namespace
145
146 CLArgMinMaxLayerKernelEx::CLArgMinMaxLayerKernelEx()
147     : _input(nullptr), _prev_output(nullptr), _output(nullptr), _reduction_axis(0),
148       _op(ReductionOperation::ARG_IDX_MAX)
149 {
150 }
151
152 void CLArgMinMaxLayerKernelEx::configure(const ICLTensor *input, const ICLTensor *prev_output,
153                                          ICLTensor *output, unsigned int axis,
154                                          ReductionOperation op)
155 {
156   ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
157   ARM_COMPUTE_ERROR_THROW_ON(
158       validate_arguments(input->info(), (prev_output != nullptr) ? prev_output->info() : nullptr,
159                          output->info(), axis, op));
160   auto win_config = validate_and_configure_window(
161       input->info(), (prev_output != nullptr) ? prev_output->info() : nullptr, output->info(), axis,
162       op);
163   ARM_COMPUTE_ERROR_THROW_ON(std::get<0>(win_config));
164
165   _input = input;
166   _prev_output = prev_output;
167   _output = output;
168   _reduction_axis = axis;
169   _op = op;
170
171   // Set build options
172   CLBuildOptions build_opts;
173
174   build_opts.add_option_if(_prev_output != nullptr, "-DPREV_OUTPUT");
175   build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(input->info()->data_type()));
176   build_opts.add_option_if(is_data_type_float(input->info()->data_type()), "-DFLOAT_DATA_TYPE");
177   build_opts.add_option_if_else(op == ReductionOperation::ARG_IDX_MAX, "-DARG_MAX", "-DARG_MIN");
178   build_opts.add_option("-DDATA_TYPE_OUTPUT=" +
179                         get_cl_type_from_data_type(output->info()->data_type()));
180   build_opts.add_option("-DDATA_TYPE_SELECT=" +
181                         get_cl_signed_type_from_element_size(input->info()->element_size()));
182
183   // Create kernel
184   cl::NDRange lws_hint = CLKernelLibrary::get().default_ndrange();
185   std::string kernel_axis_name;
186   switch (axis)
187   {
188     case 0:
189     {
190       const ICLTensor *input_for_width = prev_output != nullptr ? _prev_output : _input;
191       build_opts.add_option("-DWIDTH=" +
192                             support::cpp11::to_string(input_for_width->info()->dimension(0)));
193
194       kernel_axis_name = "x";
195       lws_hint = create_lws_hint_parallel_implementations(input_for_width->info()->dimension(0),
196                                                           vector_size);
197     }
198     break;
199     case 1:
200       build_opts.add_option("-DHEIGHT=" + support::cpp11::to_string(input->info()->dimension(1)));
201       kernel_axis_name = "y";
202       break;
203     case 2:
204       build_opts.add_option("-DDEPTH=" + support::cpp11::to_string(input->info()->dimension(2)));
205       kernel_axis_name = "z";
206       break;
207     case 3:
208       build_opts.add_option("-DDEPTH=" + support::cpp11::to_string(input->info()->dimension(2)));
209       build_opts.add_option("-DBATCH=" + support::cpp11::to_string(input->info()->dimension(3)));
210       kernel_axis_name = "w";
211       break;
212     default:
213       ARM_COMPUTE_ERROR("Not supported");
214   }
215   _kernel = static_cast<cl::Kernel>(CLKernelLibraryEx::get().create_kernel(
216       "arg_min_max_ex_" + kernel_axis_name, build_opts.options()));
217
218   // Configure kernel window
219   ICLKernel::configure_internal(std::get<1>(win_config), lws_hint);
220 }
221
222 Status CLArgMinMaxLayerKernelEx::validate(const ITensorInfo *input, const ITensorInfo *prev_output,
223                                           const ITensorInfo *output, unsigned int axis,
224                                           ReductionOperation op)
225 {
226   ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, prev_output, output, axis, op));
227   ARM_COMPUTE_RETURN_ON_ERROR(std::get<0>(validate_and_configure_window(
228       input->clone().get(), (prev_output != nullptr) ? prev_output->clone().get() : nullptr,
229       output->clone().get(), axis, op)));
230   return Status{};
231 }
232
233 void CLArgMinMaxLayerKernelEx::run(const Window &window, cl::CommandQueue &queue)
234 {
235   ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
236   ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(IKernel::window(), window);
237
238   switch (_reduction_axis)
239   {
240     case 0:
241     {
242       // Set out window
243       Window out_window(window);
244       out_window.set(Window::DimX, Window::Dimension(0, 0, 0));
245
246       // Get first input and output slices
247       Window in_slice = window.first_slice_window_2D();
248       Window out_slice = out_window.first_slice_window_2D();
249
250       // Reshape window
251       const unsigned int num_tensors = _prev_output != nullptr ? 3 : 2;
252
253       // Set local sums buffer
254       unsigned int local_res_size = lws_hint()[0] * _output->info()->element_size();
255       _kernel.setArg(num_arguments_per_2D_tensor() * num_tensors, local_res_size, nullptr);
256       do
257       {
258         unsigned int idx = 0;
259         add_2D_tensor_argument(idx, _input, in_slice);
260         if (_prev_output != nullptr)
261         {
262           add_2D_tensor_argument(idx, _prev_output, in_slice);
263         }
264         add_2D_tensor_argument(idx, _output, out_slice);
265         enqueue(queue, *this, in_slice, lws_hint());
266       } while (window.slide_window_slice_2D(in_slice) && window.slide_window_slice_2D(out_slice));
267     }
268     break;
269     case 1:
270     {
271       // Get first input and output slices
272       Window window_in{window};
273       window_in.set(Window::DimY, Window::Dimension(0, _input->info()->dimension(1),
274                                                     _input->info()->dimension(1)));
275       Window in_slice = window_in.first_slice_window_2D();
276       Window out_slice = window.first_slice_window_2D();
277
278       do
279       {
280         unsigned int idx = 0;
281         add_2D_tensor_argument(idx, _input, in_slice);
282         add_2D_tensor_argument(idx, _output, out_slice);
283         enqueue(queue, *this, in_slice, lws_hint());
284       } while (window_in.slide_window_slice_2D(in_slice) &&
285                window.slide_window_slice_2D(out_slice));
286     }
287     break;
288     case 2:
289     {
290       // Get first input and output slices
291       Window window_in{window};
292       window_in.set(Window::DimZ, Window::Dimension(0, _input->info()->dimension(2),
293                                                     _input->info()->dimension(2)));
294       Window in_slice = window_in.first_slice_window_3D();
295       Window out_slice = window.first_slice_window_3D();
296
297       do
298       {
299         unsigned int idx = 0;
300         add_3D_tensor_argument(idx, _input, in_slice);
301         add_3D_tensor_argument(idx, _output, out_slice);
302         enqueue(queue, *this, in_slice, lws_hint());
303       } while (window_in.slide_window_slice_3D(in_slice) &&
304                window.slide_window_slice_3D(out_slice));
305     }
306     break;
307     case 3:
308     {
309       // Get first input and output slices
310       Window window_in{window};
311       window_in.set(3, Window::Dimension(0, 1, 1));
312       Window in_slice = window_in.first_slice_window_4D();
313       Window out_slice = window.first_slice_window_4D();
314
315       do
316       {
317         unsigned int idx = 0;
318         add_4D_tensor_argument(idx, _input, in_slice);
319         add_4D_tensor_argument(idx, _output, out_slice);
320         enqueue(queue, *this, in_slice, lws_hint());
321       } while (window_in.slide_window_slice_4D(in_slice) &&
322                window.slide_window_slice_4D(out_slice));
323     }
324     break;
325     default:
326       ARM_COMPUTE_ERROR("Not supported");
327   }
328 }
329 } // namespace arm_compute