Propose unified kernel for Comparison operators and bug fix (#3914)
authorPrasanna R/SNAP /SRI-Bangalore/Engineer/삼성전자 <prasanna.r@samsung.com>
Fri, 14 Dec 2018 00:58:59 +0000 (06:28 +0530)
committer오형석/동작제어Lab(SR)/Staff Engineer/삼성전자 <hseok82.oh@samsung.com>
Fri, 14 Dec 2018 00:58:59 +0000 (09:58 +0900)
This patch proposed unified CL Kernel for Comparison operator
This patch includes:
- CL kernel for EQUAL/NOT_EQUAL
- TRUTH value bug fix
- CL Kernel that can be easily extended for GT, LT, GE, LE

Signed-off-by: prasannar <prasanna.r@samsung.com>
libs/ARMComputeEx/arm_compute/core/CL/kernels/CLComparisonOpKernel.h [new file with mode: 0644]
libs/ARMComputeEx/arm_compute/runtime/CL/functions/CLComparisonOp.h [new file with mode: 0644]
libs/ARMComputeEx/src/core/CL/CLKernelLibrary.cpp
libs/ARMComputeEx/src/core/CL/cl_kernels/comparison_op.cl [new file with mode: 0644]
libs/ARMComputeEx/src/core/CL/cl_kernels/comparison_op_quantized.cl [new file with mode: 0644]
libs/ARMComputeEx/src/core/CL/kernels/CLComparisonOpKernel.cpp [new file with mode: 0644]
libs/ARMComputeEx/src/runtime/CL/functions/CLComparisonOp.cpp [new file with mode: 0644]

diff --git a/libs/ARMComputeEx/arm_compute/core/CL/kernels/CLComparisonOpKernel.h b/libs/ARMComputeEx/arm_compute/core/CL/kernels/CLComparisonOpKernel.h
new file mode 100644 (file)
index 0000000..71a4cc3
--- /dev/null
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ * Copyright (c) 2016-2018 ARM Limited.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef __ARM_COMPUTE_CLCOMPARISON_OP_KERNEL_H__
+#define __ARM_COMPUTE_CLCOMPARISON_OP_KERNEL_H__
+
+#include "arm_compute/core/CL/ICLKernel.h"
+#include "arm_compute/core/TypesEx.h"
+
+namespace arm_compute
+{
+class ICLTensor;
+
+/** OpenCL kernel to check if values in both tensors are equal*/
+class CLComparisonOpKernel : public ICLKernel
+{
+public:
+  /** Default constructor */
+  CLComparisonOpKernel();
+  /** Prevent instances of this class from being copied (As this class contains pointers). */
+  CLComparisonOpKernel(const CLComparisonOpKernel &) = delete;
+  /** Prevent instances of this class from being copied (As this class contains pointers). */
+  CLComparisonOpKernel &operator=(const CLComparisonOpKernel &) = delete;
+  /** Allow instances of this class to be moved */
+  CLComparisonOpKernel(CLComparisonOpKernel &&) = default;
+  /** Allow instances of this class to be moved */
+  CLComparisonOpKernel &operator=(CLComparisonOpKernel &&) = default;
+  /** Initialize the kernel's input, output.
+ *
+ * @param[in]  input1  Source tensor1.
+ * @param[in]  input2  Source tensor2.
+ * @param[out] output  Output tensor.
+ */
+  void configure(const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output,
+                 const ComparisonOperation &op);
+
+  // Inherited methods overridden:
+  void run(const Window &window, cl::CommandQueue &queue) override;
+
+  BorderSize border_size() const override;
+
+private:
+  const ICLTensor *_input1;
+  const ICLTensor *_input2;
+  ICLTensor *_output;
+};
+} // namespace arm_compute
+#endif /*__ARM_COMPUTE_CLCOMPARISON_OP_KERNEL_H__ */
diff --git a/libs/ARMComputeEx/arm_compute/runtime/CL/functions/CLComparisonOp.h b/libs/ARMComputeEx/arm_compute/runtime/CL/functions/CLComparisonOp.h
new file mode 100644 (file)
index 0000000..1df7064
--- /dev/null
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ * Copyright (c) 2016-2018 ARM Limited.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef __ARM_COMPUTE_CLCOMPARISON_OP_H__
+#define __ARM_COMPUTE_CLCOMPARISON_OP_H__
+
+#include "arm_compute/runtime/CL/ICLSimpleFunction.h"
+#include "arm_compute/core/TypesEx.h"
+
+namespace arm_compute
+{
+class ICLTensor;
+
+class CLComparisonOp : public ICLSimpleFunction
+{
+public:
+  /** Initialise the function's source and destination.
+   *
+   * @param[in]  input1  Source tensor1. Data types supported:
+   * U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32.
+   * @param[in]  input2  Source tensor2. Data types supported:
+   * U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32.
+   * @param[out] output Output tensor. Data types supported: Same as @p input.
+   *
+   */
+  void configure(ICLTensor *input1, ICLTensor *input2, ICLTensor *output,
+                 const ComparisonOperation &op);
+};
+} // namespace arm_compute
+#endif /*__ARM_COMPUTE_CLCOMPARISON_OP_H__ */
index 83c3a6b..fdca0e6 100644 (file)
@@ -74,6 +74,8 @@ const std::map<std::string, std::string> CLKernelLibraryEx::_kernel_program_map
     {"channel_extract_YUYV422", "channel_extract.cl"},
     {"combine_gradients_L1", "canny.cl"},
     {"combine_gradients_L2", "canny.cl"},
+    {"comparison_op", "comparison_op.cl"},
+    {"comparison_op_quantized", "comparison_op_quantized.cl"},
     {"concatenate_depth", "concatenate.cl"},
     {"concatenate_width", "concatenate.cl"},
     {"convolution_rectangle", "convolution_rectangle.cl"},
@@ -341,6 +343,14 @@ const std::map<std::string, std::string> CLKernelLibraryEx::_program_source_map
 #include "./cl_kernels/cast.clembed"
     },
     {
+        "comparison_op.cl",
+#include "./cl_kernels/comparison_op.clembed"
+    },
+    {
+        "comparison_op_quantized.cl",
+#include "./cl_kernels/comparison_op_quantized.clembed"
+    },
+    {
         "embedding_lookup.cl",
 #include "./cl_kernels/embedding_lookup.clembed"
     },
diff --git a/libs/ARMComputeEx/src/core/CL/cl_kernels/comparison_op.cl b/libs/ARMComputeEx/src/core/CL/cl_kernels/comparison_op.cl
new file mode 100644 (file)
index 0000000..1664e68
--- /dev/null
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ * Copyright (c) 2016-2018 ARM Limited.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "helpers.h"
+
+/** Returns truth value of comparison operators.
+ * Comparison operators may be equal, not_equal etc.
+ *
+ * @attention The input and output data types need to be passed at compile time using -DDATA_TYPE_IN, -DDATA_TYPE_OUT,
+ * e.g. -DDATA_TYPE_IN=float, -DDATA_TYPE_OUT = uchar
+ * @attention Vector size should be given as a preprocessor argument using -DVEC_SIZE=size. e.g. -DVEC_SIZE=16
+ *
+ * @param[in]  input1_ptr                            Pointer to the source tensor. Supported data types: U8/S8/U16/S16/F16/U32/S32/F32
+ * @param[in]  input1_stride_x                       Stride of the source tensor in X dimension (in bytes)
+ * @param[in]  input1_step_x                         input1_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in]  input1_stride_y                       Stride of the source tensor in Y dimension (in bytes)
+ * @param[in]  input1_step_y                         input1_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in]  input1_stride_z                       Stride of the source tensor in Z dimension (in bytes)
+ * @param[in]  input1_step_z                         input1_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in]  input1_offset_first_element_in_bytes  The offset of the first element in the source tensor
+ *
+ * @param[in]  input2_ptr                            Pointer to the source tensor. Supported data types: U8/S8/U16/S16/F16/U32/S32/F32
+ * @param[in]  input2_stride_x                       Stride of the source tensor in X dimension (in bytes)
+ * @param[in]  input2_step_x                         input2_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in]  input2_stride_y                       Stride of the source tensor in Y dimension (in bytes)
+ * @param[in]  input2_step_y                         input2_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in]  input2_stride_z                       Stride of the source tensor in Z dimension (in bytes)
+ * @param[in]  input2_step_z                         input2_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in]  input2_offset_first_element_in_bytes  The offset of the first element in the source tensor
+ *
+ * @param[out] output_ptr                           Pointer to the destination tensor. Supported data types: QASYMM8
+ * @param[in]  output_stride_x                      Stride of the destination tensor in X dimension (in bytes)
+ * @param[in]  output_step_x                        output_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in]  output_stride_y                      Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in]  output_step_y                        output_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in]  output_stride_z                      Stride of the destination tensor in Z dimension (in bytes)
+ * @param[in]  output_step_z                        output_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in]  output_offset_first_element_in_bytes The offset of the first element in the destination image
+ */
+__kernel void comparison_op(
+    TENSOR3D_DECLARATION(input1),
+    TENSOR3D_DECLARATION(input2),
+    TENSOR3D_DECLARATION(output))
+{
+    Tensor3D input1 = CONVERT_TO_TENSOR3D_STRUCT(input1);
+    Tensor3D input2 = CONVERT_TO_TENSOR3D_STRUCT(input2);
+    Tensor3D output = CONVERT_TO_TENSOR3D_STRUCT(output);
+
+#if OP_CODE == 0 //EQUAL
+    VSTORE(VEC_SIZE)
+    (CONVERT(VLOAD(VEC_SIZE)(0, (__global DATA_TYPE_IN *)input1.ptr) == VLOAD(VEC_SIZE)(0, (__global DATA_TYPE_IN *)input2.ptr),
+             VEC_DATA_TYPE(DATA_TYPE_OUT, VEC_SIZE)),
+     0, (__global DATA_TYPE_OUT *)output.ptr);
+#elif OP_CODE == 1 //NOT_EQUAL
+    VSTORE(VEC_SIZE)
+    (CONVERT(VLOAD(VEC_SIZE)(0, (__global DATA_TYPE_IN *)input1.ptr) != VLOAD(VEC_SIZE)(0, (__global DATA_TYPE_IN *)input2.ptr),
+             VEC_DATA_TYPE(DATA_TYPE_OUT, VEC_SIZE)),
+     0, (__global DATA_TYPE_OUT *)output.ptr);
+#endif
+}
diff --git a/libs/ARMComputeEx/src/core/CL/cl_kernels/comparison_op_quantized.cl b/libs/ARMComputeEx/src/core/CL/cl_kernels/comparison_op_quantized.cl
new file mode 100644 (file)
index 0000000..133fcee
--- /dev/null
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ * Copyright (c) 2016-2018 ARM Limited.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "helpers.h"
+#define SUB(x, y) (x) - (y)
+
+/** Returns the truth value of comparison .
+ * @attention Offset and Scale of both input should be given as a preprocessor argument using -DOFFSET_IN1=int, -DOFFSET_IN2=int, -DSCALE_IN1=float and -DSCALE_IN2=float. e.g. -DOFFSET_IN1=1, -DOFFSET_IN2=0, -DSCALE_IN1=0.5, -DSCALE_IN2=0.5
+ * @attention Vector size should be given as a preprocessor argument using -DVEC_SIZE=size. e.g. -DVEC_SIZE=16
+ *
+ * @param[in]  input1_ptr                            Pointer to the source tensor. Supported data types: QASYMM8
+ * @param[in]  input1_stride_x                       Stride of the source tensor in X dimension (in bytes)
+ * @param[in]  input1_step_x                         input1_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in]  input1_stride_y                       Stride of the source tensor in Y dimension (in bytes)
+ * @param[in]  input1_step_y                         input1_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in]  input1_stride_z                       Stride of the source tensor in Z dimension (in bytes)
+ * @param[in]  input1_step_z                         input1_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in]  input1_offset_first_element_in_bytes  The offset of the first element in the source tensor
+ *
+ * @param[in]  input2_ptr                            Pointer to the source tensor. Supported data types: QASYMM8
+ * @param[in]  input2_stride_x                       Stride of the source tensor in X dimension (in bytes)
+ * @param[in]  input2_step_x                         input2_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in]  input2_stride_y                       Stride of the source tensor in Y dimension (in bytes)
+ * @param[in]  input2_step_y                         input2_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in]  input2_stride_z                       Stride of the source tensor in Z dimension (in bytes)
+ * @param[in]  input2_step_z                         input2_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in]  input2_offset_first_element_in_bytes  The offset of the first element in the source tensor
+ *
+ * @param[out] output_ptr                           Pointer to the destination tensor. Supported data types: QASYMM8
+ * @param[in]  output_stride_x                      Stride of the destination tensor in X dimension (in bytes)
+ * @param[in]  output_step_x                        output_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in]  output_stride_y                      Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in]  output_step_y                        output_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in]  output_stride_z                      Stride of the destination tensor in Z dimension (in bytes)
+ * @param[in]  output_step_z                        output_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in]  output_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ */
+__kernel void comparison_op_quantized(
+    TENSOR3D_DECLARATION(in1),
+    TENSOR3D_DECLARATION(in2),
+    TENSOR3D_DECLARATION(out))
+{
+    // Get pixels pointer
+    Tensor3D in1 = CONVERT_TO_TENSOR3D_STRUCT(in1);
+    Tensor3D in2 = CONVERT_TO_TENSOR3D_STRUCT(in2);
+    Tensor3D out = CONVERT_TO_TENSOR3D_STRUCT(out);
+
+    int16 in_a = CONVERT(vload16(0, (__global uchar *)in1.ptr), int16);
+    int16 in_b = CONVERT(vload16(0, (__global uchar *)in2.ptr), int16);
+
+    in_a = SUB(in_a, (int16)((int)OFFSET_IN1));
+    in_b = SUB(in_b, (int16)((int)OFFSET_IN2));
+
+    const float16 in1f32  = convert_float16(in_a) * (float16)((float)SCALE_IN1);
+    const float16 in2f32  = convert_float16(in_b) * (float16)((float)SCALE_IN2);
+#if OPCODE == 0 //EQUAL QUANTIZED
+    vstore16(CONVERT(in1f32 == in2f32, VEC_DATA_TYPE(DATA_TYPE_OUT, 16)), 0, (__global uchar *)out.ptr);
+#elif OPCODE == 1 //NOT EQUAL QUANTIZED
+    vstore16(CONVERT(in1f32 != in2f32, VEC_DATA_TYPE(DATA_TYPE_OUT, 16)), 0, (__global uchar *)out.ptr);
+#endif
+}
diff --git a/libs/ARMComputeEx/src/core/CL/kernels/CLComparisonOpKernel.cpp b/libs/ARMComputeEx/src/core/CL/kernels/CLComparisonOpKernel.cpp
new file mode 100644 (file)
index 0000000..134c237
--- /dev/null
@@ -0,0 +1,210 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ * Copyright (c) 2016-2018 ARM Limited.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "arm_compute/core/CL/kernels/CLComparisonOpKernel.h"
+
+#include "arm_compute/core/CL/CLHelpers.h"
+#include "arm_compute/core/CL/CLKernelLibraryEx.h"
+#include "arm_compute/core/CL/ICLTensor.h"
+
+using namespace arm_compute;
+
+namespace
+{
+constexpr unsigned int num_elems_processed_per_iteration = 16;
+
+Status validate_arguments(const ITensorInfo *input1, const ITensorInfo *input2,
+                          const ITensorInfo *output)
+{
+  const TensorShape &out_shape =
+      TensorShape::broadcast_shape(input1->tensor_shape(), input2->tensor_shape());
+
+  ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input1, 1, DataType::U8, DataType::U16,
+                                                       DataType::S16, DataType::F16, DataType::S32,
+                                                       DataType::F32, DataType::QASYMM8);
+  ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input2, 1, DataType::U8, DataType::U16,
+                                                       DataType::S16, DataType::F16, DataType::S32,
+                                                       DataType::F32, DataType::QASYMM8);
+
+  ARM_COMPUTE_RETURN_ERROR_ON_MSG(out_shape.total_size() == 0,
+                                  "Inputs are not broadcast compatible");
+  // Validate in case of configured output
+  if (output->total_size() > 0)
+  {
+    ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::QASYMM8);
+    ARM_COMPUTE_RETURN_ERROR_ON_MSG(
+        detail::have_different_dimensions(out_shape, output->tensor_shape(), 0),
+        "Wrong shape for output");
+  }
+  return Status{};
+}
+} // namespace
+
+CLComparisonOpKernel::CLComparisonOpKernel() : _input1(nullptr), _input2(nullptr), _output(nullptr)
+{
+}
+
+void CLComparisonOpKernel::configure(const ICLTensor *input1, const ICLTensor *input2,
+                                     ICLTensor *output, const ComparisonOperation &op)
+{
+  ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input1, input2);
+  ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input1->info(), input2->info(), output->info()));
+
+  _input1 = input1;
+  _input2 = input2;
+  _output = output;
+
+  // Create kernel
+  std::string kernel_name = "comparison_op";
+  int op_code;
+
+  switch (op)
+  {
+    case ComparisonOperation::EQUAL:
+      op_code = 0;
+      break;
+    case ComparisonOperation::NOT_EQUAL:
+      op_code = 1;
+      break;
+  }
+
+  std::set<std::string> build_opts;
+  build_opts.emplace(("-DOP_CODE=" + support::cpp11::to_string(op_code)));
+  build_opts.emplace(("-DDATA_TYPE_IN=" + get_cl_type_from_data_type(input1->info()->data_type())));
+  build_opts.emplace(
+      ("-DDATA_TYPE_OUT=" + get_cl_type_from_data_type(output->info()->data_type())));
+  build_opts.emplace(
+      ("-DVEC_SIZE=" + support::cpp11::to_string(num_elems_processed_per_iteration)));
+
+  if (is_data_type_quantized_asymmetric(input1->info()->data_type()) &&
+      ((input1->info()->quantization_info().offset != input2->info()->quantization_info().offset) ||
+       (input1->info()->quantization_info().scale != input2->info()->quantization_info().scale)))
+  {
+    build_opts.emplace("-DOFFSET_IN1=" +
+                       support::cpp11::to_string(input1->info()->quantization_info().offset));
+    build_opts.emplace("-DOFFSET_IN2=" +
+                       support::cpp11::to_string(input2->info()->quantization_info().offset));
+    build_opts.emplace("-DSCALE_IN1=" +
+                       support::cpp11::to_string(input1->info()->quantization_info().scale));
+    build_opts.emplace("-DSCALE_IN2=" +
+                       support::cpp11::to_string(input2->info()->quantization_info().scale));
+    kernel_name += "_quantized";
+  }
+
+  _kernel =
+      static_cast<cl::Kernel>(CLKernelLibraryEx::get().create_kernel(kernel_name, build_opts));
+
+  const std::pair<TensorShape, ValidRegion> broadcast_pair =
+      ITensorInfo::broadcast_shape_and_valid_region(*input1->info(), *input2->info());
+
+  const TensorShape &out_shape = broadcast_pair.first;
+  const ValidRegion &valid_region = broadcast_pair.second;
+
+  // Auto initialize output if not initialized
+  {
+    set_shape_if_empty(*output->info(), out_shape);
+
+    if (input1->info()->data_type() == DataType::S16 ||
+        input2->info()->data_type() == DataType::S16)
+    {
+      set_format_if_unknown(*output->info(), Format::S16);
+    }
+    else if (input1->info()->data_type() == DataType::F16 &&
+             input2->info()->data_type() == DataType::F16)
+    {
+      set_format_if_unknown(*output->info(), Format::F16);
+    }
+    else if (input1->info()->data_type() == DataType::F32 ||
+             input2->info()->data_type() == DataType::F32)
+    {
+      set_format_if_unknown(*output->info(), Format::F32);
+    }
+  }
+
+  Window win = calculate_max_window(valid_region, Steps(num_elems_processed_per_iteration));
+  Window win_input1 = win.broadcast_if_dimension_le_one(*input1->info());
+  Window win_input2 = win.broadcast_if_dimension_le_one(*input2->info());
+
+  AccessWindowHorizontal input1_access(input1->info(), 0, num_elems_processed_per_iteration);
+  AccessWindowHorizontal input2_access(input2->info(), 0, num_elems_processed_per_iteration);
+  AccessWindowHorizontal output_access(output->info(), 0, num_elems_processed_per_iteration);
+
+  bool window_changed = update_window_and_padding(win_input1, input1_access) ||
+                        update_window_and_padding(win_input2, input2_access) ||
+                        update_window_and_padding(win, output_access);
+
+  output_access.set_valid_region(win, valid_region);
+
+  ICLKernel::configure(win);
+}
+
+void CLComparisonOpKernel::run(const Window &window, cl::CommandQueue &queue)
+{
+  ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
+  ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window);
+
+  const TensorShape &in_shape1 = _input1->info()->tensor_shape();
+  const TensorShape &in_shape2 = _input2->info()->tensor_shape();
+  const TensorShape &out_shape = _output->info()->tensor_shape();
+
+  bool can_collapse = true;
+  if (std::min(in_shape1.total_size(), in_shape2.total_size()) > 1)
+  {
+    can_collapse =
+        (std::min(in_shape1.num_dimensions(), in_shape2.num_dimensions()) > Window::DimZ);
+    for (size_t d = Window::DimZ; can_collapse && (d < out_shape.num_dimensions()); d++)
+    {
+      can_collapse = (in_shape1[d] == in_shape2[d]);
+    }
+  }
+
+  bool has_collapsed = false;
+  Window collapsed =
+      can_collapse ? window.collapse_if_possible(ICLKernel::window(), Window::DimZ, &has_collapsed)
+                   : window;
+
+  const TensorShape &in_shape1_collapsed =
+      has_collapsed ? in_shape1.collapsed_from(Window::DimZ) : in_shape1;
+  const TensorShape &in_shape2_collapsed =
+      has_collapsed ? in_shape2.collapsed_from(Window::DimZ) : in_shape2;
+
+  Window slice = collapsed.first_slice_window_3D();
+  Window slice_input1 = slice.broadcast_if_dimension_le_one(in_shape1_collapsed);
+  Window slice_input2 = slice.broadcast_if_dimension_le_one(in_shape2_collapsed);
+
+  do
+  {
+    unsigned int idx = 0;
+    add_3D_tensor_argument(idx, _input1, slice_input1);
+    add_3D_tensor_argument(idx, _input2, slice_input2);
+    add_3D_tensor_argument(idx, _output, slice);
+
+    enqueue(queue, *this, slice);
+
+    collapsed.slide_window_slice_3D(slice_input1);
+    collapsed.slide_window_slice_3D(slice_input2);
+  } while (collapsed.slide_window_slice_3D(slice));
+}
+
+BorderSize CLComparisonOpKernel::border_size() const
+{
+  const unsigned int replicateSize =
+      _output->info()->dimension(0) -
+      std::min(_input1->info()->dimension(0), _input2->info()->dimension(0));
+  const unsigned int border =
+      std::min<unsigned int>(num_elems_processed_per_iteration - 1U, replicateSize);
+  return BorderSize(0, border, 0, 0);
+}
diff --git a/libs/ARMComputeEx/src/runtime/CL/functions/CLComparisonOp.cpp b/libs/ARMComputeEx/src/runtime/CL/functions/CLComparisonOp.cpp
new file mode 100644 (file)
index 0000000..f6a745a
--- /dev/null
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ * Copyright (c) 2016-2018 ARM Limited.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "arm_compute/runtime/CL/functions/CLComparisonOp.h"
+
+#include "arm_compute/core/CL/kernels/CLComparisonOpKernel.h"
+#include "arm_compute/core/CL/ICLTensor.h"
+
+using namespace arm_compute;
+
+void CLComparisonOp::configure(ICLTensor *input1, ICLTensor *input2, ICLTensor *output,
+                               const ComparisonOperation &op)
+{
+  auto k = arm_compute::support::cpp14::make_unique<CLComparisonOpKernel>();
+  k->configure(input1, input2, output, op);
+  _kernel = std::move(k);
+
+  if (output->info()->dimension(0) > 1)
+  {
+    ICLTensor *broadcasted_info = (input1->info()->dimension(0) == 1) ? input1 : input2;
+
+    if (broadcasted_info->info()->dimension(0) == 1)
+    {
+      _border_handler.configure(broadcasted_info, _kernel->border_size(), BorderMode::REPLICATE);
+    }
+  }
+}