*/
#include "helpers.h"
-
-inline Tensor4D tensor4D_from_vector_no_step(const Vector *vector, int dim_x, int dim_y, int dim_z, int dim_w)
-{
- int stride_x = vector->stride_x;
- int stride_y = stride_x * dim_x;
- int stride_z = stride_y * dim_y;
- int stride_w = stride_z * dim_z;
- Tensor4D tensor =
- {
- .ptr = vector->ptr,
- .offset_first_element_in_bytes = vector->offset_first_element_in_bytes,
- .stride_x = stride_x,
- .stride_y = stride_y,
- .stride_z = stride_z,
- .stride_w = stride_w,
- };
- return tensor;
-}
-
/** Extracts a strided slice up to 4-dimensions
*
* @note Datatype should be given as a preprocessor argument using -DELEMENT_DATA_TYPE=type. e.g. -DELEMENT_DATA_TYPE=short
* @note The size of an element should be given as a preprocessor argument using -DELEMENT_SIZE=size. e.g. -DELEMENT_SIZE=2
+ * @attention Output tensor depth should be given as a preprocessor argument using -DDEPTH_OUT=size. e.g. -DDEPTH_OUT=16
*
- * @param[in] input_ptr Pointer to the first source tensor. Supported data types: U8/S8/QS8/QASYMM8/U16/S16/QS16/U32/S32/F16/F32
- * @param[in] input_stride_x Stride of the first source tensor in X dimension (in bytes)
+ * @param[in] input_ptr Pointer to the source image. Supported data types: U8/S8/QS8/QASYMM8/U16/S16/QS16/F16/U32/S32/F32
+ * @param[in] input_stride_x Stride of the source image in X dimension (in bytes)
* @param[in] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] input_offset_first_element_in_bytes The offset of the first element in the first source tensor
- * @param[out] output_ptr Pointer to the destination tensor. Supported data types: same as @p input_ptr
- * @param[in] output_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] input_stride_y Stride of the source image in Y dimension (in bytes)
+ * @param[in] input_step_y input_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] input_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] input_offset_first_element_in_bytes The offset of the first element in the source image
+ * @param[out] output_ptr Pointer to the destination image. Supported data types: same as @p input_ptr
+ * @param[in] output_stride_x Stride of the destination image in X dimension (in bytes)
* @param[in] output_step_x output_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] output_offset_first_element_in_bytes The offset of the first element in the destination tensor
- * @param[in] dims_in The 4-dimensional dimension of the input. Supported data types: S32
- * @param[in] dims_out The 4-dimensional dimension of the output. Supported data types: S32
+ * @param[in] output_stride_y Stride of the destination image in Y dimension (in bytes)
+ * @param[in] output_step_y output_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] output_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] output_step_z output_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] output_stride_w Stride of the source tensor in W dimension (in bytes)
+ * @param[in] output_step_w output_stride_w * number of elements along W processed per workitem(in bytes)
+ * @param[in] output_offset_first_element_in_bytes The offset of the first element in the destination image
* @param[in] starts The stride of X dimension of input tensor to be sliced. Supported data types: S32
* @param[in] strides The stride of Y dimension of input tensor to be sliced. Supported data types: S32
*/
-__kernel void strided_slice(VECTOR_DECLARATION(input),
- VECTOR_DECLARATION(output),
- const int4 dims_in,
- const int4 dims_out,
+__kernel void strided_slice(TENSOR4D_DECLARATION(input),
+ TENSOR4D_DECLARATION(output),
const int4 starts,
const int4 strides)
{
- // TODO: Should be change to CONVERT_TO_TENSOR4D_STRUCT in order to reduce inference of the offset
- Vector vec_out = CONVERT_TO_VECTOR_STRUCT_NO_STEP(output);
- Vector vec_in = CONVERT_TO_VECTOR_STRUCT_NO_STEP(input);
-
- // Implemenation
- // Infer a Tensor4D from output Vector and output's dimensions info
- // Infer a Tensor4D from input Vector and input's dimensions info
- // Infer indices of output as 4D from the offset of output vector
- // Infer indices of input as 4D from indices of output
- // out(offset of output vector) = in(offset of input)
-
- Tensor4D tensor_out = tensor4D_from_vector_no_step(&vec_out, dims_out.x, dims_out.y, dims_out.z, dims_out.w);
- Tensor4D tensor_in = tensor4D_from_vector_no_step(&vec_in, dims_in.x, dims_in.y, dims_in.z, dims_in.w);
-
- // Must be output_step_x == output_stride_x == an element's size
- const int offset_out = get_global_id(0) * output_stride_x;
- int4 indices_out =
- {
- get_global_id(0) % dims_out.x,
- (offset_out / tensor_out.stride_y) % dims_out.y,
- (offset_out / tensor_out.stride_z) % dims_out.z,
- (offset_out / tensor_out.stride_w) % dims_out.w,
- };
+ Tensor4D in = CONVERT_TO_TENSOR4D_STRUCT(input, 0);
+ Tensor4D out = CONVERT_TO_TENSOR4D_STRUCT(output, DEPTH_OUT);
int4 indices_in =
{
- starts.x + (strides.x * indices_out.x),
- starts.y + (strides.y * indices_out.y),
- starts.z + (strides.z * indices_out.z),
- starts.w + (strides.w * indices_out.w),
+ starts.x + (strides.x * get_global_id(0)),
+ starts.y + (strides.y * get_global_id(1)),
+ starts.z + (strides.z * (get_global_id(2) % DEPTH_OUT)),
+ starts.w + (strides.w * (get_global_id(2) / DEPTH_OUT)),
};
-
- *((__global ELEMENT_DATA_TYPE *)vector_offset(&vec_out, get_global_id(0))) = *((__global ELEMENT_DATA_TYPE *)tensor4D_offset(&tensor_in, indices_in.x, indices_in.y, indices_in.z, indices_in.w));
+ *((__global ELEMENT_DATA_TYPE *)out.ptr) = *((__global ELEMENT_DATA_TYPE *)tensor4D_offset(&in, indices_in.x, indices_in.y, indices_in.z, indices_in.w));
}
_endMask = endMask;
_shrinkAxisMask = shrinkAxisMask;
- constexpr unsigned int num_elems_processed_per_iteration = 1;
-
// Set kernel build options
std::set<std::string> build_opts;
build_opts.emplace("-DELEMENT_DATA_TYPE=" +
get_cl_type_from_data_type(input->info()->data_type()));
build_opts.emplace("-DELEMENT_SIZE=" + support::cpp11::to_string(input->info()->element_size()));
+ build_opts.emplace("-DDEPTH_OUT=" + support::cpp11::to_string(output->info()->dimension(2)));
// Create kernel
_kernel =
static_cast<cl::Kernel>(CLKernelLibraryEx::get().create_kernel("strided_slice", build_opts));
- // Create output's window without padding
- TensorShape collapsed = output->info()->tensor_shape();
- collapsed.collapse(4);
- TensorInfo info = *output->info();
- info.set_tensor_shape(collapsed);
- Window win = calculate_max_window(info, Steps(num_elems_processed_per_iteration));
-
+ // Configure kernel window
+ Window win = calculate_max_window(*output->info(), Steps());
ICLKernel::configure(win);
}
ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(IKernel::window(), window);
- // Create input window
- TensorShape collapsed = _input->info()->tensor_shape();
- collapsed.collapse(4);
- TensorInfo info = *_input->info();
- info.set_tensor_shape(collapsed);
- Window win_in = calculate_max_window(info, Steps(_input->info()->tensor_shape().total_size()));
-
_beginData->map(queue);
_endData->map(queue);
_stridesData->map(queue);
- std::vector<int32_t> dimsIn;
- std::vector<int32_t> dimsOut;
std::vector<int32_t> starts;
- std::vector<int32_t> stops;
std::vector<int32_t> strides;
for (uint32_t n = 0; n < _beginData->info()->tensor_shape().total_size(); ++n)
StartForAxis(_beginMask, reinterpret_cast<int32_t *>(_beginData->buffer())[n],
reinterpret_cast<int32_t *>(_stridesData->buffer())[n], shape, n));
- stops.emplace_back(StopForAxis(_endMask, reinterpret_cast<int32_t *>(_endData->buffer())[n],
- reinterpret_cast<int32_t *>(_stridesData->buffer())[n], shape,
- n));
-
strides.emplace_back(reinterpret_cast<int32_t *>(_stridesData->buffer())[n]);
- dimsIn.emplace_back(shape[n]);
- dimsOut.emplace_back(getOutDim(starts[n], stops[n], strides[n]));
}
for (uint32_t n = _beginData->info()->tensor_shape().total_size(); n < 4; n++)
{
starts.emplace_back(0);
- stops.emplace_back(1);
strides.emplace_back(1);
- dimsIn.emplace_back(1);
- dimsOut.emplace_back(1);
}
// TODO: Apply shrinkAxisMask
_stridesData->unmap(queue);
_endData->unmap(queue);
- // Set parameters
- unsigned int idx = 2 * num_arguments_per_1D_tensor(); // Skip the input and output parameters
- const cl_int4 dimsInArg = {{
- static_cast<cl_int>(dimsIn[0]), static_cast<cl_int>(dimsIn[1]),
- static_cast<cl_int>(dimsIn[2]), static_cast<cl_int>(dimsIn[3]),
- }};
- _kernel.setArg<cl_int4>(idx++, dimsInArg);
-
- const cl_int4 dimsOutArg = {{
- static_cast<cl_int>(dimsOut[0]), static_cast<cl_int>(dimsOut[1]),
- static_cast<cl_int>(dimsOut[2]), static_cast<cl_int>(dimsOut[3]),
- }};
- _kernel.setArg<cl_int4>(idx++, dimsOutArg);
-
+ unsigned int idx = 2 * num_arguments_per_4D_tensor(); // Skip the input and output parameters
const cl_int4 startsArg = {{
static_cast<cl_int>(starts[0]), static_cast<cl_int>(starts[1]),
static_cast<cl_int>(starts[2]), static_cast<cl_int>(starts[3]),
}};
_kernel.setArg<cl_int4>(idx++, stridesArg);
- // TODO: Apply slicing output's window
- idx = 0;
- add_1D_tensor_argument(idx, _input, win_in);
- add_1D_tensor_argument(idx, _output, window);
+ Window slice_out = window.first_slice_window_4D().collapse(ICLKernel::window(), 2, 4);
- enqueue(queue, *this, window);
+ // Setup output slice
+ Window slice_in(slice_out);
+ slice_in.set(Window::DimX, Window::Dimension(0, 0, 0));
+ slice_in.set(Window::DimY, Window::Dimension(0, 0, 0));
+ slice_in.set(Window::DimZ, Window::Dimension(0, 0, 0));
+ slice_in.set(3, Window::Dimension(0, 0, 0));
+
+ do
+ {
+ unsigned int idx = 0;
+ add_4D_tensor_argument(idx, _input, slice_in);
+ add_4D_tensor_argument(idx, _output, slice_out);
+ enqueue(queue, *this, slice_out);
+ } while (window.slide_window_slice_4D(slice_in) && window.slide_window_slice_4D(slice_out));
}