2 * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
18 * Copyright (c) 2019-2020 ARM Limited.
20 * SPDX-License-Identifier: MIT
22 * Permission is hereby granted, free of charge, to any person obtaining a copy
23 * of this software and associated documentation files (the "Software"), to
24 * deal in the Software without restriction, including without limitation the
25 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
26 * sell copies of the Software, and to permit persons to whom the Software is
27 * furnished to do so, subject to the following conditions:
29 * The above copyright notice and this permission notice shall be included in all
30 * copies or substantial portions of the Software.
32 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
33 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
34 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
35 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
36 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
37 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
40 #include "arm_compute/core/CL/kernels/CLArgMinMaxLayerKernelEx.h"
42 #include "arm_compute/core/AccessWindowStatic.h"
43 #include "arm_compute/core/CL/CLHelpers.h"
44 #include "arm_compute/core/CL/CLKernelLibraryEx.h"
45 #include "arm_compute/core/CL/CLValidate.h"
46 #include "arm_compute/core/CL/ICLTensor.h"
47 #include "arm_compute/core/Helpers.h"
48 #include "arm_compute/core/TensorInfo.h"
49 #include "arm_compute/core/Utils.h"
50 #include "arm_compute/core/Validate.h"
51 #include "arm_compute/core/Window.h"
53 #include "support/StringSupport.h"
59 constexpr unsigned int vector_size = 16;
61 Status validate_arguments(const ITensorInfo *input, const ITensorInfo *prev_output,
62 const ITensorInfo *output, unsigned int axis, ReductionOperation op)
64 ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output);
65 ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input);
66 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8,
67 DataType::QASYMM8_SIGNED, DataType::S32,
68 DataType::F16, DataType::F32);
69 ARM_COMPUTE_RETURN_ERROR_ON_MSG(op != ReductionOperation::ARG_IDX_MAX &&
70 op != ReductionOperation::ARG_IDX_MIN,
71 "Only ARG_IDX_MAX and ARG_IDX_MIN are supported");
72 ARM_COMPUTE_RETURN_ERROR_ON_MSG(axis >= TensorShape::num_max_dimensions,
73 "Reduction axis greater than max number of dimensions");
74 ARM_COMPUTE_RETURN_ERROR_ON_MSG(axis > 3, "Unsupported reduction axis");
76 if (output->total_size() != 0)
78 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U32, DataType::S32,
81 if (prev_output != nullptr && prev_output->total_size() != 0)
83 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(prev_output, 1, DataType::U32,
84 DataType::S32, DataType::S64);
85 if (output->total_size() != 0)
87 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(prev_output, output);
94 std::tuple<Status, Window> validate_and_configure_window(ITensorInfo *input,
95 ITensorInfo *prev_output,
96 ITensorInfo *output, unsigned int axis,
97 ReductionOperation op)
99 ARM_COMPUTE_UNUSED(op);
100 // Output tensor auto initialization if not yet initialized
101 TensorShape output_shape{input->tensor_shape()};
102 output_shape.set(axis, 1);
103 DataType output_data_type = (prev_output != nullptr) ? (prev_output->data_type()) : DataType::S32;
104 auto_init_if_empty(*output, input->clone()
105 ->set_tensor_shape(output_shape)
106 .set_data_type(output_data_type)
108 .set_is_resizable(true));
111 calculate_max_window((prev_output != nullptr) ? (*prev_output) : (*input), Steps(vector_size));
112 bool window_changed = false;
118 ITensorInfo *input_tensor_access = prev_output != nullptr ? prev_output : input;
119 AccessWindowStatic input_access(input_tensor_access, 0, 0,
120 static_cast<int>(input_tensor_access->dimension(0)), 1);
121 AccessWindowHorizontal output_access(output, 0, 1);
122 window_changed = update_window_and_padding(win, input_access, output_access);
123 output_access.set_valid_region(win, ValidRegion(Coordinates(), output->tensor_shape()));
130 AccessWindowHorizontal input_access(input, 0, vector_size);
131 AccessWindowHorizontal output_access(output, 0, vector_size);
132 window_changed = update_window_and_padding(win, input_access, output_access);
133 output_access.set_valid_region(win, ValidRegion(Coordinates(), output->tensor_shape()));
137 ARM_COMPUTE_ERROR("Not supported");
140 Status err = (window_changed)
141 ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!")
143 return std::make_tuple(err, win);
147 CLArgMinMaxLayerKernelEx::CLArgMinMaxLayerKernelEx()
148 : _input(nullptr), _prev_output(nullptr), _output(nullptr), _reduction_axis(0),
149 _op(ReductionOperation::ARG_IDX_MAX)
153 void CLArgMinMaxLayerKernelEx::configure(const ICLTensor *input, const ICLTensor *prev_output,
154 ICLTensor *output, unsigned int axis,
155 ReductionOperation op)
157 ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
158 ARM_COMPUTE_ERROR_THROW_ON(
159 validate_arguments(input->info(), (prev_output != nullptr) ? prev_output->info() : nullptr,
160 output->info(), axis, op));
161 auto win_config = validate_and_configure_window(
162 input->info(), (prev_output != nullptr) ? prev_output->info() : nullptr, output->info(), axis,
164 ARM_COMPUTE_ERROR_THROW_ON(std::get<0>(win_config));
167 _prev_output = prev_output;
169 _reduction_axis = axis;
173 CLBuildOptions build_opts;
175 build_opts.add_option_if(_prev_output != nullptr, "-DPREV_OUTPUT");
176 build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(input->info()->data_type()));
177 build_opts.add_option_if(is_data_type_float(input->info()->data_type()), "-DFLOAT_DATA_TYPE");
178 build_opts.add_option_if_else(op == ReductionOperation::ARG_IDX_MAX, "-DARG_MAX", "-DARG_MIN");
179 build_opts.add_option("-DDATA_TYPE_OUTPUT=" +
180 get_cl_type_from_data_type(output->info()->data_type()));
181 build_opts.add_option("-DDATA_TYPE_SELECT=" +
182 get_cl_signed_type_from_element_size(input->info()->element_size()));
185 cl::NDRange lws_hint = CLKernelLibrary::get().default_ndrange();
186 std::string kernel_axis_name;
191 const ICLTensor *input_for_width = prev_output != nullptr ? _prev_output : _input;
192 build_opts.add_option("-DWIDTH=" +
193 support::cpp11::to_string(input_for_width->info()->dimension(0)));
195 kernel_axis_name = "x";
196 lws_hint = create_lws_hint_parallel_implementations(input_for_width->info()->dimension(0),
201 build_opts.add_option("-DHEIGHT=" + support::cpp11::to_string(input->info()->dimension(1)));
202 kernel_axis_name = "y";
205 build_opts.add_option("-DDEPTH=" + support::cpp11::to_string(input->info()->dimension(2)));
206 kernel_axis_name = "z";
209 build_opts.add_option("-DDEPTH=" + support::cpp11::to_string(input->info()->dimension(2)));
210 build_opts.add_option("-DBATCH=" + support::cpp11::to_string(input->info()->dimension(3)));
211 kernel_axis_name = "w";
214 ARM_COMPUTE_ERROR("Not supported");
216 _kernel = static_cast<cl::Kernel>(CLKernelLibraryEx::get().create_kernel(
217 "arg_min_max_ex_" + kernel_axis_name, build_opts.options()));
219 // Configure kernel window
220 ICLKernel::configure_internal(std::get<1>(win_config), lws_hint);
223 Status CLArgMinMaxLayerKernelEx::validate(const ITensorInfo *input, const ITensorInfo *prev_output,
224 const ITensorInfo *output, unsigned int axis,
225 ReductionOperation op)
227 ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, prev_output, output, axis, op));
228 ARM_COMPUTE_RETURN_ON_ERROR(std::get<0>(validate_and_configure_window(
229 input->clone().get(), (prev_output != nullptr) ? prev_output->clone().get() : nullptr,
230 output->clone().get(), axis, op)));
234 void CLArgMinMaxLayerKernelEx::run(const Window &window, cl::CommandQueue &queue)
236 ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
237 ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(IKernel::window(), window);
239 switch (_reduction_axis)
244 Window out_window(window);
245 out_window.set(Window::DimX, Window::Dimension(0, 0, 0));
247 // Get first input and output slices
248 Window in_slice = window.first_slice_window_2D();
249 Window out_slice = out_window.first_slice_window_2D();
252 const unsigned int num_tensors = _prev_output != nullptr ? 3 : 2;
254 // Set local sums buffer
255 unsigned int local_res_size = lws_hint()[0] * _output->info()->element_size();
256 _kernel.setArg(num_arguments_per_2D_tensor() * num_tensors, local_res_size, nullptr);
259 unsigned int idx = 0;
260 add_2D_tensor_argument(idx, _input, in_slice);
261 if (_prev_output != nullptr)
263 add_2D_tensor_argument(idx, _prev_output, in_slice);
265 add_2D_tensor_argument(idx, _output, out_slice);
266 enqueue(queue, *this, in_slice, lws_hint());
267 } while (window.slide_window_slice_2D(in_slice) && window.slide_window_slice_2D(out_slice));
272 // Get first input and output slices
273 Window window_in{window};
274 window_in.set(Window::DimY, Window::Dimension(0, _input->info()->dimension(1),
275 _input->info()->dimension(1)));
276 Window in_slice = window_in.first_slice_window_2D();
277 Window out_slice = window.first_slice_window_2D();
281 unsigned int idx = 0;
282 add_2D_tensor_argument(idx, _input, in_slice);
283 add_2D_tensor_argument(idx, _output, out_slice);
284 enqueue(queue, *this, in_slice, lws_hint());
285 } while (window_in.slide_window_slice_2D(in_slice) &&
286 window.slide_window_slice_2D(out_slice));
291 // Get first input and output slices
292 Window window_in{window};
293 window_in.set(Window::DimZ, Window::Dimension(0, _input->info()->dimension(2),
294 _input->info()->dimension(2)));
295 Window in_slice = window_in.first_slice_window_3D();
296 Window out_slice = window.first_slice_window_3D();
300 unsigned int idx = 0;
301 add_3D_tensor_argument(idx, _input, in_slice);
302 add_3D_tensor_argument(idx, _output, out_slice);
303 enqueue(queue, *this, in_slice, lws_hint());
304 } while (window_in.slide_window_slice_3D(in_slice) &&
305 window.slide_window_slice_3D(out_slice));
310 // Get first input and output slices
311 Window window_in{window};
312 window_in.set(3, Window::Dimension(0, 1, 1));
313 Window in_slice = window_in.first_slice_window_4D();
314 Window out_slice = window.first_slice_window_4D();
318 unsigned int idx = 0;
319 add_4D_tensor_argument(idx, _input, in_slice);
320 add_4D_tensor_argument(idx, _output, out_slice);
321 enqueue(queue, *this, in_slice, lws_hint());
322 } while (window_in.slide_window_slice_4D(in_slice) &&
323 window.slide_window_slice_4D(out_slice));
327 ARM_COMPUTE_ERROR("Not supported");
330 } // namespace arm_compute