From 0a5fb5bbf07d5d306d94aa226a7172a97f932441 Mon Sep 17 00:00:00 2001 From: =?utf8?q?Prasanna=20R/SNAP=20/SRI-Bangalore/Engineer/=EC=82=BC?= =?utf8?q?=EC=84=B1=EC=A0=84=EC=9E=90?= Date: Tue, 18 Dec 2018 08:15:04 +0530 Subject: [PATCH] Remove CL implementation of Equal & Not Equal (#4069) Since CLComparisonOp which is an unified CL for comparison op is merged the redundant files can be removed. Related issue: #3851 Signed-off-by: prasannar --- .../arm_compute/core/CL/kernels/CLEqualKernel.h | 59 ------- .../arm_compute/core/CL/kernels/CLNotEqualKernel.h | 64 ------- .../arm_compute/runtime/CL/functions/CLEqual.h | 40 ----- .../arm_compute/runtime/CL/functions/CLNotEqual.h | 41 ----- libs/ARMComputeEx/src/core/CL/CLKernelLibrary.cpp | 20 --- libs/ARMComputeEx/src/core/CL/cl_kernels/equal.cl | 65 ------- .../src/core/CL/cl_kernels/equal_quantized.cl | 73 -------- .../src/core/CL/cl_kernels/notequal.cl | 65 ------- .../src/core/CL/cl_kernels/notequal_quantized.cl | 73 -------- .../src/core/CL/kernels/CLEqualKernel.cpp | 192 --------------------- .../src/core/CL/kernels/CLNotEqualKernel.cpp | 191 -------------------- .../src/runtime/CL/functions/CLEqual.cpp | 39 ----- .../src/runtime/CL/functions/CLNotEqual.cpp | 40 ----- 13 files changed, 962 deletions(-) delete mode 100644 libs/ARMComputeEx/arm_compute/core/CL/kernels/CLEqualKernel.h delete mode 100644 libs/ARMComputeEx/arm_compute/core/CL/kernels/CLNotEqualKernel.h delete mode 100644 libs/ARMComputeEx/arm_compute/runtime/CL/functions/CLEqual.h delete mode 100644 libs/ARMComputeEx/arm_compute/runtime/CL/functions/CLNotEqual.h delete mode 100644 libs/ARMComputeEx/src/core/CL/cl_kernels/equal.cl delete mode 100644 libs/ARMComputeEx/src/core/CL/cl_kernels/equal_quantized.cl delete mode 100644 libs/ARMComputeEx/src/core/CL/cl_kernels/notequal.cl delete mode 100644 libs/ARMComputeEx/src/core/CL/cl_kernels/notequal_quantized.cl delete mode 100644 libs/ARMComputeEx/src/core/CL/kernels/CLEqualKernel.cpp delete mode 100644 libs/ARMComputeEx/src/core/CL/kernels/CLNotEqualKernel.cpp delete mode 100644 libs/ARMComputeEx/src/runtime/CL/functions/CLEqual.cpp delete mode 100644 libs/ARMComputeEx/src/runtime/CL/functions/CLNotEqual.cpp diff --git a/libs/ARMComputeEx/arm_compute/core/CL/kernels/CLEqualKernel.h b/libs/ARMComputeEx/arm_compute/core/CL/kernels/CLEqualKernel.h deleted file mode 100644 index f6b6623..0000000 --- a/libs/ARMComputeEx/arm_compute/core/CL/kernels/CLEqualKernel.h +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved - * Copyright (c) 2016-2018 ARM Limited. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef __ARM_COMPUTE_CLEQUALKERNEL_H__ -#define __ARM_COMPUTE_CLEQUALKERNEL_H__ - -#include "arm_compute/core/CL/ICLKernel.h" - -namespace arm_compute -{ -class ICLTensor; - -/** OpenCL kernel to check if values in both tensors are equal*/ -class CLEqualKernel : public ICLKernel -{ -public: - /** Default constructor */ - CLEqualKernel(); - /** Prevent instances of this class from being copied (As this class contains pointers). */ - CLEqualKernel(const CLEqualKernel &) = delete; - /** Prevent instances of this class from being copied (As this class contains pointers). */ - CLEqualKernel &operator=(const CLEqualKernel &) = delete; - /** Allow instances of this class to be moved */ - CLEqualKernel(CLEqualKernel &&) = default; - /** Allow instances of this class to be moved */ - CLEqualKernel &operator=(CLEqualKernel &&) = default; - /** Initialize the kernel's input, output. - * - * @param[in] input1 Source tensor1. - * @param[in] input2 Source tensor2. - * @param[out] output Output tensor. - */ - void configure(const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output); - - // Inherited methods overridden: - void run(const Window &window, cl::CommandQueue &queue) override; - - BorderSize border_size() const override; - -private: - const ICLTensor *_input1; - const ICLTensor *_input2; - ICLTensor *_output; -}; -} // namespace arm_compute -#endif /*__ARM_COMPUTE_CLEQUALKERNEL_H__ */ diff --git a/libs/ARMComputeEx/arm_compute/core/CL/kernels/CLNotEqualKernel.h b/libs/ARMComputeEx/arm_compute/core/CL/kernels/CLNotEqualKernel.h deleted file mode 100644 index 383dee8..0000000 --- a/libs/ARMComputeEx/arm_compute/core/CL/kernels/CLNotEqualKernel.h +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved - * Copyright (c) 2016-2018 ARM Limited. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef __ARM_COMPUTE_CLNOTEQUALKERNEL_H__ -#define __ARM_COMPUTE_CLNOTEQUALKERNEL_H__ - -#include "arm_compute/core/CL/ICLKernel.h" - -namespace arm_compute -{ -class ICLTensor; - -/** OpenCL kernel to check if values in both tensors are not equal*/ -class CLNotEqualKernel : public ICLKernel -{ -public: - /** Default constructor */ - CLNotEqualKernel(); - /** Prevent instances of this class from being copied (As this class contains pointers). */ - CLNotEqualKernel(const CLNotEqualKernel &) = delete; - /** Prevent instances of this class from being copied (As this class contains pointers). */ - CLNotEqualKernel &operator=(const CLNotEqualKernel &) = delete; - /** Allow instances of this class to be moved */ - CLNotEqualKernel(CLNotEqualKernel &&) = default; - /** Allow instances of this class to be moved */ - CLNotEqualKernel &operator=(CLNotEqualKernel &&) = default; - /** Initialize the kernel's input, output. - * - * @param[in] input1 Source tensor 1. - Data types supported: U8/S8/U16/S16/U32/S32/F16/F32 - * @param[in] input2 Source tensor 2. - Data types supported: U8/S8/U16/S16/U32/S32/F16/F32 - * @param[out] output Output tensor. - A boolean tensor indicating the truth value of non-equality of input - tensors. - Data types supported: U8. - */ - void configure(const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output); - - // Inherited methods overridden: - void run(const Window &window, cl::CommandQueue &queue) override; - - BorderSize border_size() const override; - -private: - const ICLTensor *_input1; - const ICLTensor *_input2; - ICLTensor *_output; -}; -} // namespace arm_compute -#endif /*__ARM_COMPUTE_CLNOTEQUALKERNEL_H__ */ diff --git a/libs/ARMComputeEx/arm_compute/runtime/CL/functions/CLEqual.h b/libs/ARMComputeEx/arm_compute/runtime/CL/functions/CLEqual.h deleted file mode 100644 index 1f772d1..0000000 --- a/libs/ARMComputeEx/arm_compute/runtime/CL/functions/CLEqual.h +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved - * Copyright (c) 2016-2018 ARM Limited. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef __ARM_COMPUTE_CLEQUAL_H__ -#define __ARM_COMPUTE_CLEQUAL_H__ - -#include "arm_compute/runtime/CL/ICLSimpleFunction.h" - -namespace arm_compute -{ -class ICLTensor; - -class CLEqual : public ICLSimpleFunction -{ -public: - /** Initialise the function's source and destination. - * - * @param[in] input1 Source tensor1. Data types supported: - * U8/S8/QS8/QASYMM8/U16/S16/QS16/F16/U32/S32/F32. - * @param[in] input2 Source tensor2. Data types supported: - * U8/S8/QS8/QASYMM8/U16/S16/QS16/F16/U32/S32/F32. - * @param[out] output Output tensor. Data types supported: Same as @p input. - */ - void configure(ICLTensor *input1, ICLTensor *input2, ICLTensor *output); -}; -} // namespace arm_compute -#endif /*__ARM_COMPUTE_CLEQUAL_H__ */ diff --git a/libs/ARMComputeEx/arm_compute/runtime/CL/functions/CLNotEqual.h b/libs/ARMComputeEx/arm_compute/runtime/CL/functions/CLNotEqual.h deleted file mode 100644 index 019db54..0000000 --- a/libs/ARMComputeEx/arm_compute/runtime/CL/functions/CLNotEqual.h +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved - * Copyright (c) 2016-2018 ARM Limited. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef __ARM_COMPUTE_CLNOT_EQUAL_H__ -#define __ARM_COMPUTE_CLNOT_EQUAL_H__ - -#include "arm_compute/runtime/CL/ICLSimpleFunction.h" - -namespace arm_compute -{ -class ICLTensor; - -class CLNotEqual : public ICLSimpleFunction -{ -public: - /** Initialise the function's source and destination. - * - * @param[in] input1 Source tensor1. Data types supported: - * U8/S8/U16/S16/U32/S32/F16/F32. - * @param[in] input2 Source tensor2. Data types supported: - * U8/S8/U16/S16/U32/S32/F16/F32. - * @param[out] output Output tensor. Data types supported: - * Data types supported: U8. - */ - void configure(ICLTensor *input1, ICLTensor *input2, ICLTensor *output); -}; -} // namespace arm_compute -#endif /*__ARM_COMPUTE_CLNOT_EQUAL_H__ */ diff --git a/libs/ARMComputeEx/src/core/CL/CLKernelLibrary.cpp b/libs/ARMComputeEx/src/core/CL/CLKernelLibrary.cpp index 3007ee1..6326ee1 100644 --- a/libs/ARMComputeEx/src/core/CL/CLKernelLibrary.cpp +++ b/libs/ARMComputeEx/src/core/CL/CLKernelLibrary.cpp @@ -122,8 +122,6 @@ const std::map CLKernelLibraryEx::_kernel_program_map {"direct_convolution5x5_f32_bifrost", "direct_convolution5x5.cl"}, {"direct_convolution_1x1_3x3_5x5_quantized", "direct_convolution_1x1_3x3_5x5_quantized.cl"}, {"embedding_lookup", "embedding_lookup.cl"}, - {"equal", "equal.cl"}, - {"equal_quantized", "equal_quantized.cl"}, {"erode", "erode.cl"}, {"exp_layer", "exp.cl"}, {"fast_corners", "fast_corners.cl"}, @@ -215,8 +213,6 @@ const std::map CLKernelLibraryEx::_kernel_program_map {"non_max_suppression", "nonmax.cl"}, {"normalization_layer_cross_map", "normalization_layer.cl"}, {"normalization_layer_in_map", "normalization_layer.cl"}, - {"notequal", "notequal.cl"}, - {"notequal_quantized", "notequal_quantized.cl"}, {"NV12_to_IYUV_bt709", "color_convert.cl"}, {"NV12_to_RGB888_bt709", "color_convert.cl"}, {"NV12_to_RGBA8888_bt709", "color_convert.cl"}, @@ -356,14 +352,6 @@ const std::map CLKernelLibraryEx::_program_source_map #include "./cl_kernels/embedding_lookup.clembed" }, { - "equal.cl", -#include "./cl_kernels/equal.clembed" - }, - { - "equal_quantized.cl", -#include "./cl_kernels/equal_quantized.clembed" - }, - { "depth_to_space.cl", #include "./cl_kernels/depth_to_space.clembed" }, @@ -400,14 +388,6 @@ const std::map CLKernelLibraryEx::_program_source_map #include "./cl_kernels/neg_tensor.clembed" }, { - "notequal.cl", -#include "./cl_kernels/notequal.clembed" - }, - { - "notequal_quantized.cl", -#include "./cl_kernels/notequal_quantized.clembed" - }, - { "pad.cl", #include "./cl_kernels/pad.clembed" }, diff --git a/libs/ARMComputeEx/src/core/CL/cl_kernels/equal.cl b/libs/ARMComputeEx/src/core/CL/cl_kernels/equal.cl deleted file mode 100644 index 74bbd6c..0000000 --- a/libs/ARMComputeEx/src/core/CL/cl_kernels/equal.cl +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved - * Copyright (c) 2016-2018 ARM Limited. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include "helpers.h" - -/** Checks if values in both tensors are equal. - * - * @attention The input and output data types need to be passed at compile time using -DDATA_TYPE_IN, -DDATA_TYPE_OUT, - * e.g. -DDATA_TYPE_IN=float, -DDATA_TYPE_OUT = uchar - * @attention Vector size should be given as a preprocessor argument using -DVEC_SIZE=size. e.g. -DVEC_SIZE=16 - * - * @param[in] input1_ptr Pointer to the source tensor. Supported data types: U8/S8/U16/S16/F16/U32/S32/F32 - * @param[in] input1_stride_x Stride of the source tensor in X dimension (in bytes) - * @param[in] input1_step_x input1_stride_x * number of elements along X processed per workitem(in bytes) - * @param[in] input1_stride_y Stride of the source tensor in Y dimension (in bytes) - * @param[in] input1_step_y input1_stride_y * number of elements along Y processed per workitem(in bytes) - * @param[in] input1_stride_z Stride of the source tensor in Z dimension (in bytes) - * @param[in] input1_step_z input1_stride_z * number of elements along Z processed per workitem(in bytes) - * @param[in] input1_offset_first_element_in_bytes The offset of the first element in the source tensor - * - * @param[in] input2_ptr Pointer to the source tensor. Supported data types: U8/S8/U16/S16/F16/U32/S32/F32 - * @param[in] input2_stride_x Stride of the source tensor in X dimension (in bytes) - * @param[in] input2_step_x input2_stride_x * number of elements along X processed per workitem(in bytes) - * @param[in] input2_stride_y Stride of the source tensor in Y dimension (in bytes) - * @param[in] input2_step_y input2_stride_y * number of elements along Y processed per workitem(in bytes) - * @param[in] input2_stride_z Stride of the source tensor in Z dimension (in bytes) - * @param[in] input2_step_z input2_stride_z * number of elements along Z processed per workitem(in bytes) - * @param[in] input2_offset_first_element_in_bytes The offset of the first element in the source tensor - * - * @param[out] output_ptr Pointer to the destination tensor. Supported data types: QASYMM8 - * @param[in] output_stride_x Stride of the destination tensor in X dimension (in bytes) - * @param[in] output_step_x output_stride_x * number of elements along X processed per workitem(in bytes) - * @param[in] output_stride_y Stride of the destination tensor in Y dimension (in bytes) - * @param[in] output_step_y output_stride_y * number of elements along Y processed per workitem(in bytes) - * @param[in] output_stride_z Stride of the destination tensor in Z dimension (in bytes) - * @param[in] output_step_z output_stride_z * number of elements along Z processed per workitem(in bytes) - * @param[in] output_offset_first_element_in_bytes The offset of the first element in the destination image - */ -__kernel void equal( - TENSOR3D_DECLARATION(input1), - TENSOR3D_DECLARATION(input2), - TENSOR3D_DECLARATION(output)) -{ - Tensor3D input1 = CONVERT_TO_TENSOR3D_STRUCT(input1); - Tensor3D input2 = CONVERT_TO_TENSOR3D_STRUCT(input2); - Tensor3D output = CONVERT_TO_TENSOR3D_STRUCT(output); - - VSTORE(VEC_SIZE) - (CONVERT(VLOAD(VEC_SIZE)(0, (__global DATA_TYPE_IN *)input1.ptr) == VLOAD(VEC_SIZE)(0, (__global DATA_TYPE_IN *)input2.ptr) ? 1 : 0, - VEC_DATA_TYPE(DATA_TYPE_OUT, VEC_SIZE)), - 0, (__global DATA_TYPE_OUT *)output.ptr); -} diff --git a/libs/ARMComputeEx/src/core/CL/cl_kernels/equal_quantized.cl b/libs/ARMComputeEx/src/core/CL/cl_kernels/equal_quantized.cl deleted file mode 100644 index a29398e..0000000 --- a/libs/ARMComputeEx/src/core/CL/cl_kernels/equal_quantized.cl +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved - * Copyright (c) 2016-2018 ARM Limited. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include "helpers.h" -#define SUB(x, y) (x) - (y) - -/** Checks if values in both tensors are equal. - * @attention Offset and Scale of both input should be given as a preprocessor argument using -DOFFSET_IN1=int, -DOFFSET_IN2=int, -DSCALE_IN1=float and -DSCALE_IN2=float. e.g. -DOFFSET_IN1=1, -DOFFSET_IN2=0, -DSCALE_IN1=0.5, -DSCALE_IN2=0.5 - * @attention Vector size should be given as a preprocessor argument using -DVEC_SIZE=size. e.g. -DVEC_SIZE=16 - * - * @param[in] input1_ptr Pointer to the source tensor. Supported data types: QASYMM8 - * @param[in] input1_stride_x Stride of the source tensor in X dimension (in bytes) - * @param[in] input1_step_x input1_stride_x * number of elements along X processed per workitem(in bytes) - * @param[in] input1_stride_y Stride of the source tensor in Y dimension (in bytes) - * @param[in] input1_step_y input1_stride_y * number of elements along Y processed per workitem(in bytes) - * @param[in] input1_stride_z Stride of the source tensor in Z dimension (in bytes) - * @param[in] input1_step_z input1_stride_z * number of elements along Z processed per workitem(in bytes) - * @param[in] input1_offset_first_element_in_bytes The offset of the first element in the source tensor - * - * @param[in] input2_ptr Pointer to the source tensor. Supported data types: QASYMM8 - * @param[in] input2_stride_x Stride of the source tensor in X dimension (in bytes) - * @param[in] input2_step_x input2_stride_x * number of elements along X processed per workitem(in bytes) - * @param[in] input2_stride_y Stride of the source tensor in Y dimension (in bytes) - * @param[in] input2_step_y input2_stride_y * number of elements along Y processed per workitem(in bytes) - * @param[in] input2_stride_z Stride of the source tensor in Z dimension (in bytes) - * @param[in] input2_step_z input2_stride_z * number of elements along Z processed per workitem(in bytes) - * @param[in] input2_offset_first_element_in_bytes The offset of the first element in the source tensor - * - * @param[out] output_ptr Pointer to the destination tensor. Supported data types: QASYMM8 - * @param[in] output_stride_x Stride of the destination tensor in X dimension (in bytes) - * @param[in] output_step_x output_stride_x * number of elements along X processed per workitem(in bytes) - * @param[in] output_stride_y Stride of the destination tensor in Y dimension (in bytes) - * @param[in] output_step_y output_stride_y * number of elements along Y processed per workitem(in bytes) - * @param[in] output_stride_z Stride of the destination tensor in Z dimension (in bytes) - * @param[in] output_step_z output_stride_z * number of elements along Z processed per workitem(in bytes) - * @param[in] output_offset_first_element_in_bytes The offset of the first element in the destination tensor - */ -__kernel void equal_quantized( - TENSOR3D_DECLARATION(in1), - TENSOR3D_DECLARATION(in2), - TENSOR3D_DECLARATION(out)) -{ - // Get pixels pointer - Tensor3D in1 = CONVERT_TO_TENSOR3D_STRUCT(in1); - Tensor3D in2 = CONVERT_TO_TENSOR3D_STRUCT(in2); - Tensor3D out = CONVERT_TO_TENSOR3D_STRUCT(out); - - int16 in_a = CONVERT(vload16(0, (__global uchar *)in1.ptr), int16); - int16 in_b = CONVERT(vload16(0, (__global uchar *)in2.ptr), int16); - - in_a = SUB(in_a, (int16)((int)OFFSET_IN1)); - in_b = SUB(in_b, (int16)((int)OFFSET_IN2)); - - const float16 in1f32 = convert_float16(in_a) * (float16)((float)SCALE_IN1); - const float16 in2f32 = convert_float16(in_b) * (float16)((float)SCALE_IN2); - const float16 qresf32 = convert_float16((in1f32 == in2f32) ? 1 : 0); - const uchar16 res = convert_uchar16_sat(convert_int16_rte(qresf32)); - // Store result - vstore16(res, 0, (__global uchar *)out.ptr); -} diff --git a/libs/ARMComputeEx/src/core/CL/cl_kernels/notequal.cl b/libs/ARMComputeEx/src/core/CL/cl_kernels/notequal.cl deleted file mode 100644 index 49ab39d..0000000 --- a/libs/ARMComputeEx/src/core/CL/cl_kernels/notequal.cl +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved - * Copyright (c) 2016-2018 ARM Limited. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include "helpers.h" - -/** Checks if values in both tensors are not equal. - * - * @attention The input and output data types need to be passed at compile time using -DDATA_TYPE_IN, -DDATA_TYPE_OUT, - * e.g. -DDATA_TYPE_IN=float, -DDATA_TYPE_OUT = uchar - * @attention Vector size should be given as a preprocessor argument using -DVEC_SIZE=size. e.g. -DVEC_SIZE=16 - * - * @param[in] input1_ptr Pointer to the source tensor. Supported data types: U8/S8/U16/S16/F16/U32/S32/F32 - * @param[in] input1_stride_x Stride of the source tensor in X dimension (in bytes) - * @param[in] input1_step_x input1_stride_x * number of elements along X processed per workitem(in bytes) - * @param[in] input1_stride_y Stride of the source tensor in Y dimension (in bytes) - * @param[in] input1_step_y input1_stride_y * number of elements along Y processed per workitem(in bytes) - * @param[in] input1_stride_z Stride of the source tensor in Z dimension (in bytes) - * @param[in] input1_step_z input1_stride_z * number of elements along Z processed per workitem(in bytes) - * @param[in] input1_offset_first_element_in_bytes The offset of the first element in the source tensor - * - * @param[in] input2_ptr Pointer to the source tensor. Supported data types: U8/S8/U16/S16/F16/U32/S32/F32 - * @param[in] input2_stride_x Stride of the source tensor in X dimension (in bytes) - * @param[in] input2_step_x input2_stride_x * number of elements along X processed per workitem(in bytes) - * @param[in] input2_stride_y Stride of the source tensor in Y dimension (in bytes) - * @param[in] input2_step_y input2_stride_y * number of elements along Y processed per workitem(in bytes) - * @param[in] input2_stride_z Stride of the source tensor in Z dimension (in bytes) - * @param[in] input2_step_z input2_stride_z * number of elements along Z processed per workitem(in bytes) - * @param[in] input2_offset_first_element_in_bytes The offset of the first element in the source tensor - * - * @param[out] output_ptr Pointer to the destination tensor. Supported data types: QASYMM8 - * @param[in] output_stride_x Stride of the destination tensor in X dimension (in bytes) - * @param[in] output_step_x output_stride_x * number of elements along X processed per workitem(in bytes) - * @param[in] output_stride_y Stride of the destination tensor in Y dimension (in bytes) - * @param[in] output_step_y output_stride_y * number of elements along Y processed per workitem(in bytes) - * @param[in] output_stride_z Stride of the destination tensor in Z dimension (in bytes) - * @param[in] output_step_z output_stride_z * number of elements along Z processed per workitem(in bytes) - * @param[in] output_offset_first_element_in_bytes The offset of the first element in the destination image - */ -__kernel void notequal( - TENSOR3D_DECLARATION(input1), - TENSOR3D_DECLARATION(input2), - TENSOR3D_DECLARATION(output)) -{ - Tensor3D input1 = CONVERT_TO_TENSOR3D_STRUCT(input1); - Tensor3D input2 = CONVERT_TO_TENSOR3D_STRUCT(input2); - Tensor3D output = CONVERT_TO_TENSOR3D_STRUCT(output); - - VSTORE(VEC_SIZE) - (CONVERT(VLOAD(VEC_SIZE)(0, (__global DATA_TYPE_IN *)input1.ptr) != VLOAD(VEC_SIZE)(0, (__global DATA_TYPE_IN *)input2.ptr) ? 1 : 0, - VEC_DATA_TYPE(DATA_TYPE_OUT, VEC_SIZE)), - 0, (__global DATA_TYPE_OUT *)output.ptr); -} diff --git a/libs/ARMComputeEx/src/core/CL/cl_kernels/notequal_quantized.cl b/libs/ARMComputeEx/src/core/CL/cl_kernels/notequal_quantized.cl deleted file mode 100644 index a4a7026..0000000 --- a/libs/ARMComputeEx/src/core/CL/cl_kernels/notequal_quantized.cl +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved - * Copyright (c) 2016-2018 ARM Limited. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include "helpers.h" -#define SUB(x, y) (x) - (y) - -/** Checks if values in both tensors are not equal. - * @attention Offset and Scale of both input should be given as a preprocessor argument using -DOFFSET_IN1=int, -DOFFSET_IN2=int, -DSCALE_IN1=float and -DSCALE_IN2=float. e.g. -DOFFSET_IN1=1, -DOFFSET_IN2=0, -DSCALE_IN1=0.5, -DSCALE_IN2=0.5 - * @attention Vector size should be given as a preprocessor argument using -DVEC_SIZE=size. e.g. -DVEC_SIZE=16 - * - * @param[in] input1_ptr Pointer to the source tensor. Supported data types: QASYMM8 - * @param[in] input1_stride_x Stride of the source tensor in X dimension (in bytes) - * @param[in] input1_step_x input1_stride_x * number of elements along X processed per workitem(in bytes) - * @param[in] input1_stride_y Stride of the source tensor in Y dimension (in bytes) - * @param[in] input1_step_y input1_stride_y * number of elements along Y processed per workitem(in bytes) - * @param[in] input1_stride_z Stride of the source tensor in Z dimension (in bytes) - * @param[in] input1_step_z input1_stride_z * number of elements along Z processed per workitem(in bytes) - * @param[in] input1_offset_first_element_in_bytes The offset of the first element in the source tensor - * - * @param[in] input2_ptr Pointer to the source tensor. Supported data types: QASYMM8 - * @param[in] input2_stride_x Stride of the source tensor in X dimension (in bytes) - * @param[in] input2_step_x input2_stride_x * number of elements along X processed per workitem(in bytes) - * @param[in] input2_stride_y Stride of the source tensor in Y dimension (in bytes) - * @param[in] input2_step_y input2_stride_y * number of elements along Y processed per workitem(in bytes) - * @param[in] input2_stride_z Stride of the source tensor in Z dimension (in bytes) - * @param[in] input2_step_z input2_stride_z * number of elements along Z processed per workitem(in bytes) - * @param[in] input2_offset_first_element_in_bytes The offset of the first element in the source tensor - * - * @param[out] output_ptr Pointer to the destination tensor. Supported data types: QASYMM8 - * @param[in] output_stride_x Stride of the destination tensor in X dimension (in bytes) - * @param[in] output_step_x output_stride_x * number of elements along X processed per workitem(in bytes) - * @param[in] output_stride_y Stride of the destination tensor in Y dimension (in bytes) - * @param[in] output_step_y output_stride_y * number of elements along Y processed per workitem(in bytes) - * @param[in] output_stride_z Stride of the destination tensor in Z dimension (in bytes) - * @param[in] output_step_z output_stride_z * number of elements along Z processed per workitem(in bytes) - * @param[in] output_offset_first_element_in_bytes The offset of the first element in the destination tensor - */ -__kernel void notequal_quantized( - TENSOR3D_DECLARATION(in1), - TENSOR3D_DECLARATION(in2), - TENSOR3D_DECLARATION(out)) -{ - // Get pixels pointer - Tensor3D in1 = CONVERT_TO_TENSOR3D_STRUCT(in1); - Tensor3D in2 = CONVERT_TO_TENSOR3D_STRUCT(in2); - Tensor3D out = CONVERT_TO_TENSOR3D_STRUCT(out); - - int16 in_a = CONVERT(vload16(0, (__global uchar *)in1.ptr), int16); - int16 in_b = CONVERT(vload16(0, (__global uchar *)in2.ptr), int16); - - in_a = SUB(in_a, (int16)((int)OFFSET_IN1)); - in_b = SUB(in_b, (int16)((int)OFFSET_IN2)); - - const float16 in1f32 = convert_float16(in_a) * (float16)((float)SCALE_IN1); - const float16 in2f32 = convert_float16(in_b) * (float16)((float)SCALE_IN2); - const float16 qresf32 = convert_float16((in1f32 != in2f32) ? 1 : 0); - const uchar16 res = convert_uchar16_sat(convert_int16_rte(qresf32)); - // Store result - vstore16(res, 0, (__global uchar *)out.ptr); -} diff --git a/libs/ARMComputeEx/src/core/CL/kernels/CLEqualKernel.cpp b/libs/ARMComputeEx/src/core/CL/kernels/CLEqualKernel.cpp deleted file mode 100644 index 39c74cb..0000000 --- a/libs/ARMComputeEx/src/core/CL/kernels/CLEqualKernel.cpp +++ /dev/null @@ -1,192 +0,0 @@ -/* - * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved - * Copyright (c) 2016-2018 ARM Limited. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include "arm_compute/core/CL/kernels/CLEqualKernel.h" - -#include "arm_compute/core/CL/CLHelpers.h" -#include "arm_compute/core/CL/CLKernelLibraryEx.h" -#include "arm_compute/core/CL/ICLTensor.h" - -using namespace arm_compute; - -namespace -{ -constexpr unsigned int num_elems_processed_per_iteration = 16; - -Status validate_arguments(const ITensorInfo *input1, const ITensorInfo *input2, - const ITensorInfo *output) -{ - const TensorShape &out_shape = - TensorShape::broadcast_shape(input1->tensor_shape(), input2->tensor_shape()); - - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input1, 1, DataType::U8, DataType::QS8, - DataType::QS16, DataType::S16, DataType::F16, - DataType::F32, DataType::QASYMM8); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input2, 1, DataType::U8, DataType::QS8, - DataType::QS16, DataType::S16, DataType::F16, - DataType::F32, DataType::QASYMM8); - - ARM_COMPUTE_RETURN_ERROR_ON_MSG(out_shape.total_size() == 0, - "Inputs are not broadcast compatible"); - // Validate in case of configured output - if (output->total_size() > 0) - { - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::QASYMM8); - ARM_COMPUTE_RETURN_ERROR_ON_MSG( - detail::have_different_dimensions(out_shape, output->tensor_shape(), 0), - "Wrong shape for output"); - } - return Status{}; -} -} // namespace - -CLEqualKernel::CLEqualKernel() : _input1(nullptr), _input2(nullptr), _output(nullptr) {} - -void CLEqualKernel::configure(const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output) -{ - ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input1, input2); - ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input1->info(), input2->info(), output->info())); - - _input1 = input1; - _input2 = input2; - _output = output; - - // Create kernel - std::string kernel_name = "equal"; - std::set build_opts; - build_opts.emplace(("-DDATA_TYPE_IN=" + get_cl_type_from_data_type(input1->info()->data_type()))); - build_opts.emplace( - ("-DDATA_TYPE_OUT=" + get_cl_type_from_data_type(output->info()->data_type()))); - build_opts.emplace( - ("-DVEC_SIZE=" + support::cpp11::to_string(num_elems_processed_per_iteration))); - - if (is_data_type_quantized_asymmetric(input1->info()->data_type())) - { - build_opts.emplace("-DOFFSET_IN1=" + - support::cpp11::to_string(input1->info()->quantization_info().offset)); - build_opts.emplace("-DOFFSET_IN2=" + - support::cpp11::to_string(input2->info()->quantization_info().offset)); - build_opts.emplace("-DSCALE_IN1=" + - support::cpp11::to_string(input1->info()->quantization_info().scale)); - build_opts.emplace("-DSCALE_IN2=" + - support::cpp11::to_string(input2->info()->quantization_info().scale)); - kernel_name += "_quantized"; - } - - _kernel = - static_cast(CLKernelLibraryEx::get().create_kernel(kernel_name, build_opts)); - - const std::pair broadcast_pair = - ITensorInfo::broadcast_shape_and_valid_region(*input1->info(), *input2->info()); - - const TensorShape &out_shape = broadcast_pair.first; - const ValidRegion &valid_region = broadcast_pair.second; - - // Auto initialize output if not initialized - { - set_shape_if_empty(*output->info(), out_shape); - - if (input1->info()->data_type() == DataType::S16 || - input2->info()->data_type() == DataType::S16) - { - set_format_if_unknown(*output->info(), Format::S16); - } - else if (input1->info()->data_type() == DataType::F16 && - input2->info()->data_type() == DataType::F16) - { - set_format_if_unknown(*output->info(), Format::F16); - } - else if (input1->info()->data_type() == DataType::F32 || - input2->info()->data_type() == DataType::F32) - { - set_format_if_unknown(*output->info(), Format::F32); - } - } - - Window win = calculate_max_window(valid_region, Steps(num_elems_processed_per_iteration)); - Window win_input1 = win.broadcast_if_dimension_le_one(*input1->info()); - Window win_input2 = win.broadcast_if_dimension_le_one(*input2->info()); - - AccessWindowHorizontal input1_access(input1->info(), 0, num_elems_processed_per_iteration); - AccessWindowHorizontal input2_access(input2->info(), 0, num_elems_processed_per_iteration); - AccessWindowHorizontal output_access(output->info(), 0, num_elems_processed_per_iteration); - - bool window_changed = update_window_and_padding(win_input1, input1_access) || - update_window_and_padding(win_input2, input2_access) || - update_window_and_padding(win, output_access); - - output_access.set_valid_region(win, valid_region); - - ICLKernel::configure(win); -} - -void CLEqualKernel::run(const Window &window, cl::CommandQueue &queue) -{ - ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this); - ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window); - - const TensorShape &in_shape1 = _input1->info()->tensor_shape(); - const TensorShape &in_shape2 = _input2->info()->tensor_shape(); - const TensorShape &out_shape = _output->info()->tensor_shape(); - - bool can_collapse = true; - if (std::min(in_shape1.total_size(), in_shape2.total_size()) > 1) - { - can_collapse = - (std::min(in_shape1.num_dimensions(), in_shape2.num_dimensions()) > Window::DimZ); - for (size_t d = Window::DimZ; can_collapse && (d < out_shape.num_dimensions()); d++) - { - can_collapse = (in_shape1[d] == in_shape2[d]); - } - } - - bool has_collapsed = false; - Window collapsed = - can_collapse ? window.collapse_if_possible(ICLKernel::window(), Window::DimZ, &has_collapsed) - : window; - - const TensorShape &in_shape1_collapsed = - has_collapsed ? in_shape1.collapsed_from(Window::DimZ) : in_shape1; - const TensorShape &in_shape2_collapsed = - has_collapsed ? in_shape2.collapsed_from(Window::DimZ) : in_shape2; - - Window slice = collapsed.first_slice_window_3D(); - Window slice_input1 = slice.broadcast_if_dimension_le_one(in_shape1_collapsed); - Window slice_input2 = slice.broadcast_if_dimension_le_one(in_shape2_collapsed); - - do - { - unsigned int idx = 0; - add_3D_tensor_argument(idx, _input1, slice_input1); - add_3D_tensor_argument(idx, _input2, slice_input2); - add_3D_tensor_argument(idx, _output, slice); - - enqueue(queue, *this, slice); - - collapsed.slide_window_slice_3D(slice_input1); - collapsed.slide_window_slice_3D(slice_input2); - } while (collapsed.slide_window_slice_3D(slice)); -} - -BorderSize CLEqualKernel::border_size() const -{ - const unsigned int replicateSize = - _output->info()->dimension(0) - - std::min(_input1->info()->dimension(0), _input2->info()->dimension(0)); - const unsigned int border = - std::min(num_elems_processed_per_iteration - 1U, replicateSize); - return BorderSize(0, border, 0, 0); -} diff --git a/libs/ARMComputeEx/src/core/CL/kernels/CLNotEqualKernel.cpp b/libs/ARMComputeEx/src/core/CL/kernels/CLNotEqualKernel.cpp deleted file mode 100644 index c662616..0000000 --- a/libs/ARMComputeEx/src/core/CL/kernels/CLNotEqualKernel.cpp +++ /dev/null @@ -1,191 +0,0 @@ -/* - * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved - * Copyright (c) 2016-2018 ARM Limited. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include "arm_compute/core/CL/kernels/CLNotEqualKernel.h" - -#include "arm_compute/core/CL/CLHelpers.h" -#include "arm_compute/core/CL/CLKernelLibraryEx.h" -#include "arm_compute/core/CL/ICLTensor.h" - -using namespace arm_compute; - -namespace -{ -constexpr unsigned int num_elems_processed_per_iteration = 16; - -Status validate_arguments(const ITensorInfo *input1, const ITensorInfo *input2, - const ITensorInfo *output) -{ - const TensorShape &out_shape = - TensorShape::broadcast_shape(input1->tensor_shape(), input2->tensor_shape()); - - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN( - input1, 1, DataType::U8, DataType::S16, DataType::F16, DataType::F32, DataType::QASYMM8); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN( - input2, 1, DataType::U8, DataType::S16, DataType::F16, DataType::F32, DataType::QASYMM8); - - ARM_COMPUTE_RETURN_ERROR_ON_MSG(out_shape.total_size() == 0, - "Inputs are not broadcast compatible"); - // Validate in case of configured output - if (output->total_size() > 0) - { - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::QASYMM8); - ARM_COMPUTE_RETURN_ERROR_ON_MSG( - detail::have_different_dimensions(out_shape, output->tensor_shape(), 0), - "Wrong shape for output"); - } - return Status{}; -} -} // namespace - -CLNotEqualKernel::CLNotEqualKernel() : _input1(nullptr), _input2(nullptr), _output(nullptr) {} - -void CLNotEqualKernel::configure(const ICLTensor *input1, const ICLTensor *input2, - ICLTensor *output) -{ - ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input1, input2); - ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input1->info(), input2->info(), output->info())); - - _input1 = input1; - _input2 = input2; - _output = output; - - // Create kernel - std::string kernel_name = "notequal"; - std::set build_opts; - build_opts.emplace(("-DDATA_TYPE_IN=" + get_cl_type_from_data_type(input1->info()->data_type()))); - build_opts.emplace( - ("-DDATA_TYPE_OUT=" + get_cl_type_from_data_type(output->info()->data_type()))); - build_opts.emplace( - ("-DVEC_SIZE=" + support::cpp11::to_string(num_elems_processed_per_iteration))); - - if (is_data_type_quantized_asymmetric(input1->info()->data_type())) - { - build_opts.emplace("-DOFFSET_IN1=" + - support::cpp11::to_string(input1->info()->quantization_info().offset)); - build_opts.emplace("-DOFFSET_IN2=" + - support::cpp11::to_string(input2->info()->quantization_info().offset)); - build_opts.emplace("-DSCALE_IN1=" + - support::cpp11::to_string(input1->info()->quantization_info().scale)); - build_opts.emplace("-DSCALE_IN2=" + - support::cpp11::to_string(input2->info()->quantization_info().scale)); - kernel_name += "_quantized"; - } - - _kernel = - static_cast(CLKernelLibraryEx::get().create_kernel(kernel_name, build_opts)); - - const std::pair broadcast_pair = - ITensorInfo::broadcast_shape_and_valid_region(*input1->info(), *input2->info()); - - const TensorShape &out_shape = broadcast_pair.first; - const ValidRegion &valid_region = broadcast_pair.second; - - // Auto initialize output if not initialized - { - set_shape_if_empty(*output->info(), out_shape); - - if (input1->info()->data_type() == DataType::S16 || - input2->info()->data_type() == DataType::S16) - { - set_format_if_unknown(*output->info(), Format::S16); - } - else if (input1->info()->data_type() == DataType::F16 && - input2->info()->data_type() == DataType::F16) - { - set_format_if_unknown(*output->info(), Format::F16); - } - else if (input1->info()->data_type() == DataType::F32 || - input2->info()->data_type() == DataType::F32) - { - set_format_if_unknown(*output->info(), Format::F32); - } - } - - Window win = calculate_max_window(valid_region, Steps(num_elems_processed_per_iteration)); - Window win_input1 = win.broadcast_if_dimension_le_one(*input1->info()); - Window win_input2 = win.broadcast_if_dimension_le_one(*input2->info()); - - AccessWindowHorizontal input1_access(input1->info(), 0, num_elems_processed_per_iteration); - AccessWindowHorizontal input2_access(input2->info(), 0, num_elems_processed_per_iteration); - AccessWindowHorizontal output_access(output->info(), 0, num_elems_processed_per_iteration); - - bool window_changed = update_window_and_padding(win_input1, input1_access) || - update_window_and_padding(win_input2, input2_access) || - update_window_and_padding(win, output_access); - - output_access.set_valid_region(win, valid_region); - - ICLKernel::configure(win); -} - -void CLNotEqualKernel::run(const Window &window, cl::CommandQueue &queue) -{ - ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this); - ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window); - - const TensorShape &in_shape1 = _input1->info()->tensor_shape(); - const TensorShape &in_shape2 = _input2->info()->tensor_shape(); - const TensorShape &out_shape = _output->info()->tensor_shape(); - - bool can_collapse = true; - if (std::min(in_shape1.total_size(), in_shape2.total_size()) > 1) - { - can_collapse = - (std::min(in_shape1.num_dimensions(), in_shape2.num_dimensions()) > Window::DimZ); - for (size_t d = Window::DimZ; can_collapse && (d < out_shape.num_dimensions()); d++) - { - can_collapse = (in_shape1[d] == in_shape2[d]); - } - } - - bool has_collapsed = false; - Window collapsed = - can_collapse ? window.collapse_if_possible(ICLKernel::window(), Window::DimZ, &has_collapsed) - : window; - - const TensorShape &in_shape1_collapsed = - has_collapsed ? in_shape1.collapsed_from(Window::DimZ) : in_shape1; - const TensorShape &in_shape2_collapsed = - has_collapsed ? in_shape2.collapsed_from(Window::DimZ) : in_shape2; - - Window slice = collapsed.first_slice_window_3D(); - Window slice_input1 = slice.broadcast_if_dimension_le_one(in_shape1_collapsed); - Window slice_input2 = slice.broadcast_if_dimension_le_one(in_shape2_collapsed); - - do - { - unsigned int idx = 0; - add_3D_tensor_argument(idx, _input1, slice_input1); - add_3D_tensor_argument(idx, _input2, slice_input2); - add_3D_tensor_argument(idx, _output, slice); - - enqueue(queue, *this, slice); - - collapsed.slide_window_slice_3D(slice_input1); - collapsed.slide_window_slice_3D(slice_input2); - } while (collapsed.slide_window_slice_3D(slice)); -} - -BorderSize CLNotEqualKernel::border_size() const -{ - const unsigned int replicateSize = - _output->info()->dimension(0) - - std::min(_input1->info()->dimension(0), _input2->info()->dimension(0)); - const unsigned int border = - std::min(num_elems_processed_per_iteration - 1U, replicateSize); - return BorderSize(0, border, 0, 0); -} diff --git a/libs/ARMComputeEx/src/runtime/CL/functions/CLEqual.cpp b/libs/ARMComputeEx/src/runtime/CL/functions/CLEqual.cpp deleted file mode 100644 index 15f7e16..0000000 --- a/libs/ARMComputeEx/src/runtime/CL/functions/CLEqual.cpp +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved - * Copyright (c) 2016-2018 ARM Limited. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include "arm_compute/runtime/CL/functions/CLEqual.h" - -#include "arm_compute/core/CL/kernels/CLEqualKernel.h" -#include "arm_compute/core/CL/ICLTensor.h" - -using namespace arm_compute; - -void CLEqual::configure(ICLTensor *input1, ICLTensor *input2, ICLTensor *output) -{ - auto k = arm_compute::support::cpp14::make_unique(); - k->configure(input1, input2, output); - _kernel = std::move(k); - - if (output->info()->dimension(0) > 1) - { - ICLTensor *broadcasted_info = (input1->info()->dimension(0) == 1) ? input1 : input2; - - if (broadcasted_info->info()->dimension(0) == 1) - { - _border_handler.configure(broadcasted_info, _kernel->border_size(), BorderMode::REPLICATE); - } - } -} diff --git a/libs/ARMComputeEx/src/runtime/CL/functions/CLNotEqual.cpp b/libs/ARMComputeEx/src/runtime/CL/functions/CLNotEqual.cpp deleted file mode 100644 index d447dc6..0000000 --- a/libs/ARMComputeEx/src/runtime/CL/functions/CLNotEqual.cpp +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved - * Copyright (c) 2016-2018 ARM Limited. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include "arm_compute/runtime/CL/functions/CLNotEqual.h" - -#include "arm_compute/core/CL/kernels/CLNotEqualKernel.h" - -#include "arm_compute/core/CL/ICLTensor.h" - -using namespace arm_compute; - -void CLNotEqual::configure(ICLTensor *input1, ICLTensor *input2, ICLTensor *output) -{ - auto k = arm_compute::support::cpp14::make_unique(); - k->configure(input1, input2, output); - _kernel = std::move(k); - - if (output->info()->dimension(0) > 1) - { - ICLTensor *broadcasted_info = (input1->info()->dimension(0) == 1) ? input1 : input2; - - if (broadcasted_info->info()->dimension(0) == 1) - { - _border_handler.configure(broadcasted_info, _kernel->border_size(), BorderMode::REPLICATE); - } - } -} -- 2.7.4