2 * Copyright (c) 2016, 2017 ARM Limited.
4 * SPDX-License-Identifier: MIT
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 #ifndef __ARM_COMPUTE_HELPERS_H__
25 #define __ARM_COMPUTE_HELPERS_H__
27 #include "arm_compute/core/Coordinates.h"
28 #include "arm_compute/core/IAccessWindow.h"
29 #include "arm_compute/core/Steps.h"
30 #include "arm_compute/core/Strides.h"
31 #include "arm_compute/core/TensorShape.h"
32 #include "arm_compute/core/Window.h"
39 #include <type_traits>
50 #ifndef DOXYGEN_SKIP_THIS /* Doxygen gets confused by the templates and can't match the implementation to the declaration */
54 typedef std::unique_ptr<T> _Single_object;
58 struct _Unique_if<T[]>
60 typedef std::unique_ptr<T[]> _Unknown_bound;
63 template <class T, size_t N>
64 struct _Unique_if<T[N]>
66 typedef void _Known_bound;
69 template <class T, class... Args>
70 typename _Unique_if<T>::_Single_object
71 make_unique(Args &&... args)
73 return std::unique_ptr<T>(new T(std::forward<Args>(args)...));
77 typename _Unique_if<T>::_Unknown_bound
80 typedef typename std::remove_extent<T>::type U;
81 return std::unique_ptr<T>(new U[n]());
84 template <class T, class... Args>
85 typename _Unique_if<T>::_Known_bound
86 make_unique(Args &&...) = delete;
87 #endif /* DOXYGEN_SKIP_THIS */
93 /** Computes bilinear interpolation using the pointer to the top-left pixel and the pixel's distance between
94 * the real coordinates and the smallest following integer coordinates.
96 * @param[in] pixel_ptr Pointer to the top-left pixel value. Format: Single channel U8
97 * @param[in] stride Stride to access the bottom-left and bottom-right pixel values
98 * @param[in] dx Pixel's distance between the X real coordinate and the smallest X following integer
99 * @param[in] dy Pixel's distance between the Y real coordinate and the smallest Y following integer
101 * @note dx and dy must be in the range [0, 1.0]
103 * @return The bilinear interpolated pixel value
105 inline uint8_t delta_bilinear_c1u8(const uint8_t *pixel_ptr, size_t stride, float dx, float dy);
107 /** Return the pixel at (x,y) using bilinear interpolation. The image must be single channel U8
109 * @warning Only works if the iterator was created with an IImage
111 * @param[in] first_pixel_ptr Pointer to the first pixel of a single channel U8 image.
112 * @param[in] stride Stride in bytes of the image;
113 * @param[in] x X position of the wanted pixel
114 * @param[in] y Y position of the wanted pixel
116 * @return The pixel at (x, y) using bilinear interpolation.
118 inline uint8_t pixel_bilinear_c1u8(const uint8_t *first_pixel_ptr, size_t stride, float x, float y);
120 /** Return the pixel at (x,y) using bilinear interpolation by clamping when out of borders. The image must be single channel U8
122 * @warning Only works if the iterator was created with an IImage
124 * @param[in] first_pixel_ptr Pointer to the first pixel of a single channel U8 image.
125 * @param[in] stride Stride in bytes of the image
126 * @param[in] width Width of the image
127 * @param[in] height Height of the image
128 * @param[in] x X position of the wanted pixel
129 * @param[in] y Y position of the wanted pixel
131 * @return The pixel at (x, y) using bilinear interpolation.
133 inline uint8_t pixel_bilinear_c1u8_clamp(const uint8_t *first_pixel_ptr, size_t stride, size_t width, size_t height, float x, float y);
135 /** Return the pixel at (x,y) using area interpolation by clamping when out of borders. The image must be single channel U8
137 * @note The interpolation area depends on the width and height ration of the input and output images
138 * @note Currently average of the contributing pixels is calculated
140 * @param[in] first_pixel_ptr Pointer to the first pixel of a single channel U8 image.
141 * @param[in] stride Stride in bytes of the image
142 * @param[in] width Width of the image
143 * @param[in] height Height of the image
144 * @param[in] wr Width ratio among the input image width and output image width.
145 * @param[in] hr Height ratio among the input image height and output image height.
146 * @param[in] x X position of the wanted pixel
147 * @param[in] y Y position of the wanted pixel
149 * @return The pixel at (x, y) using area interpolation.
151 inline uint8_t pixel_area_c1u8_clamp(const uint8_t *first_pixel_ptr, size_t stride, size_t width, size_t height, float wr, float hr, int x, int y);
153 /** Performs clamping among a lower and upper value.
155 * @param[in] n Value to clamp.
156 * @param[in] lower Lower threshold.
157 * @param[in] upper Upper threshold.
159 * @return Clamped value.
161 template <typename T>
162 inline T clamp(const T &n, const T &lower, const T &upper)
164 return std::max(lower, std::min(n, upper));
167 /** Base case of for_each. Does nothing. */
168 template <typename F>
169 inline void for_each(F &&)
173 /** Call the function for each of the arguments
175 * @param[in] func Function to be called
176 * @param[in] arg Argument passed to the function
177 * @param[in] args Remaining arguments
179 template <typename F, typename T, typename... Ts>
180 inline void for_each(F &&func, T &&arg, Ts &&... args)
183 for_each(func, args...);
186 /** Base case of foldl.
190 template <typename F, typename T>
191 inline T foldl(F &&, const T &value)
196 /** Base case of foldl.
198 * @return Function evaluation for value1 and value2
200 template <typename F, typename T, typename U>
201 inline auto foldl(F &&func, T &&value1, U &&value2) -> decltype(func(value1, value2))
203 return func(value1, value2);
208 * @param[in] func Function to be called
209 * @param[in] initial Initial value
210 * @param[in] value Argument passed to the function
211 * @param[in] values Remaining arguments
213 template <typename F, typename I, typename T, typename... Vs>
214 inline I foldl(F &&func, I &&initial, T &&value, Vs &&... values)
216 return foldl(std::forward<F>(func), func(std::forward<I>(initial), std::forward<T>(value)), std::forward<Vs>(values)...);
220 namespace arm_compute
222 /** Iterator updated by @ref execute_window_loop for each window element */
226 /** Default constructor to create an empty iterator */
227 constexpr Iterator();
228 /** Create a container iterator for the metadata and allocation contained in the ITensor
230 * @param[in] tensor The tensor to associate to the iterator.
231 * @param[in] window The window which will be used to iterate over the tensor.
233 Iterator(const ITensor *tensor, const Window &window);
235 /** Increment the iterator along the specified dimension of the step value associated to the dimension.
237 * @warning It is the caller's responsibility to call increment(dimension+1) when reaching the end of a dimension, the iterator will not check for overflow.
239 * @note When incrementing a dimension 'n' the coordinates of all the dimensions in the range (0,n-1) are reset. For example if you iterate over a 2D image, everytime you change row (dimension 1), the iterator for the width (dimension 0) is reset to its start.
241 * @param[in] dimension Dimension to increment
243 void increment(size_t dimension);
245 /** Return the offset in bytes from the first element to the current position of the iterator
247 * @return The current position of the iterator in bytes relative to the first element.
249 constexpr int offset() const;
251 /** Return a pointer to the current pixel.
253 * @warning Only works if the iterator was created with an ITensor.
255 * @return equivalent to buffer() + offset()
257 constexpr uint8_t *ptr() const;
259 /** Move the iterator back to the beginning of the specified dimension.
261 * @param[in] dimension Dimension to reset
263 void reset(size_t dimension);
271 constexpr Dimension()
272 : _dim_start(0), _stride(0)
280 std::array<Dimension, Coordinates::num_max_dimensions> _dims;
283 /** Iterate through the passed window, automatically adjusting the iterators and calling the lambda_functino for each element.
284 * It passes the x and y positions to the lambda_function for each iteration
286 * @param[in] w Window to iterate through.
287 * @param[in] lambda_function The function of type void(function)( const Coordinates & id ) to call at each iteration.
288 * Where id represents the absolute coordinates of the item to process.
289 * @param[in,out] iterators Tensor iterators which will be updated by this function before calling lambda_function.
291 template <typename L, typename... Ts>
292 inline void execute_window_loop(const Window &w, L &&lambda_function, Ts &&... iterators);
294 /** Update window and padding size for each of the access patterns.
296 * First the window size is reduced based on all access patterns that are not
297 * allowed to modify the padding of the underlying tensor. Then the padding of
298 * the remaining tensors is increased to match the window.
300 * @param[in] win Window that is used by the kernel.
301 * @param[in] patterns Access patterns used to calculate the final window and padding.
303 * @return True if the window has been changed. Changes to the padding do not
304 * influence the returned value.
306 template <typename... Ts>
307 bool update_window_and_padding(Window &win, Ts &&... patterns)
309 bool window_changed = false;
311 for_each([&](const IAccessWindow & w)
313 window_changed |= w.update_window_if_needed(win);
317 bool padding_changed = false;
319 for_each([&](const IAccessWindow & w)
321 padding_changed |= w.update_padding_if_needed(win);
325 return window_changed;
328 /** Calculate the maximum window for a given tensor shape and border setting
330 * @param[in] info Tensor info object defining the shape of the object for which the window is created.
331 * @param[in] steps (Optional) Number of elements processed for each step.
332 * @param[in] skip_border (Optional) If true exclude the border region from the window.
333 * @param[in] border_size (Optional) Border size.
335 * @return The maximum window the kernel can be executed on.
337 Window calculate_max_window(const TensorInfo &info, const Steps &steps = Steps(), bool skip_border = false, BorderSize border_size = BorderSize());
339 /** Calculate the maximum window used by a horizontal kernel for a given tensor shape and border setting
341 * @param[in] info Tensor info object defining the shape of the object for which the window is created.
342 * @param[in] steps (Optional) Number of elements processed for each step.
343 * @param[in] skip_border (Optional) If true exclude the border region from the window.
344 * @param[in] border_size (Optional) Border size. The border region will be excluded from the window.
346 * @return The maximum window the kernel can be executed on.
348 Window calculate_max_window_horizontal(const TensorInfo &info, const Steps &steps = Steps(), bool skip_border = false, BorderSize border_size = BorderSize());
350 /** Intersect multiple valid regions.
352 * @param[in] regions Valid regions.
354 * @return Intersection of all regions.
356 template <typename... Ts>
357 ValidRegion intersect_valid_regions(Ts &&... regions)
359 auto intersect = [](const ValidRegion & r1, const ValidRegion & r2) -> ValidRegion
363 for(size_t d = 0; d < std::min(r1.anchor.num_dimensions(), r2.anchor.num_dimensions()); ++d)
365 region.anchor.set(d, std::max(r1.anchor[d], r2.anchor[d]));
368 for(size_t d = 0; d < std::min(r1.shape.num_dimensions(), r2.shape.num_dimensions()); ++d)
370 region.shape.set(d, std::min(r1.shape[d], r2.shape[d]));
376 return foldl(intersect, std::forward<Ts>(regions)...);
379 /** Create a strides object based on the provided strides and the tensor dimensions.
381 * @param[in] info Tensor info object providing the shape of the tensor for unspecified strides.
382 * @param[in] stride_x Stride to be used in X dimension (in bytes).
383 * @param[in] fixed_strides Strides to be used in higher dimensions starting at Y (in bytes).
385 * @return Strides object based on the specified strides. Missing strides are
386 * calculated based on the tensor shape and the strides of lower dimensions.
388 template <typename T, typename... Ts>
389 inline Strides compute_strides(const TensorInfo &info, T stride_x, Ts &&... fixed_strides)
391 const TensorShape &shape = info.tensor_shape();
393 // Create strides object
394 Strides strides(stride_x, fixed_strides...);
396 for(size_t i = 1 + sizeof...(Ts); i < info.num_dimensions(); ++i)
398 strides.set(i, shape[i - 1] * strides[i - 1]);
404 /** Create a strides object based on the tensor dimensions.
406 * @param[in] info Tensor info object used to compute the strides.
408 * @return Strides object based on element size and tensor shape.
410 template <typename... Ts>
411 inline Strides compute_strides(const TensorInfo &info)
413 return compute_strides(info, info.element_size());
417 #include "arm_compute/core/Helpers.inl"
418 #endif /*__ARM_COMPUTE_HELPERS_H__ */