COMPMID-3979 Sanitise Padding Removal epic
authorSiCong Li <sicong.li@arm.com>
Tue, 17 Nov 2020 14:09:01 +0000 (14:09 +0000)
committerSiCong Li <sicong.li@arm.com>
Tue, 17 Nov 2020 16:38:52 +0000 (16:38 +0000)
* Add missing padding immutability asserts in all relevant CL kernels
* Remove unnecessary zero padding validation tests.

Change-Id: If93f9ccbc988e0286f5e7b135f812141476d5da0
Signed-off-by: SiCong Li <sicong.li@arm.com>
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/4446
Reviewed-by: Gian Marco Iodice <gianmarco.iodice@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Tested-by: Arm Jenkins <bsgcomp@arm.com>
27 files changed:
src/core/CL/cl_kernels/activation_layer.cl
src/core/CL/cl_kernels/activation_layer_quant.cl
src/core/CL/kernels/CLActivationLayerKernel.cpp
src/core/CL/kernels/CLBatchConcatenateLayerKernel.cpp
src/core/CL/kernels/CLBatchNormalizationLayerKernel.cpp
src/core/CL/kernels/CLDepthConcatenateLayerKernel.cpp
src/core/CL/kernels/CLElementwiseOperationKernel.cpp
src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ScaleByFixedPointKernel.cpp
src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ScaleByFloatKernel.cpp
src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ScaleKernel.cpp
src/core/CL/kernels/CLGEMMMatrixMultiplyNativeKernel.cpp
src/core/CL/kernels/CLGEMMMatrixMultiplyReshapedKernel.cpp
src/core/CL/kernels/CLGEMMReshapeLHSMatrixKernel.cpp
src/core/CL/kernels/CLIm2ColKernel.cpp
src/core/CL/kernels/CLPoolingLayerKernel.cpp
src/core/CL/kernels/CLWinogradInputTransformKernel.cpp
tests/validation/CL/ActivationLayer.cpp
tests/validation/CL/ArithmeticAddition.cpp
tests/validation/CL/BatchConcatenateLayer.cpp
tests/validation/CL/BatchNormalizationLayer.cpp
tests/validation/CL/GEMMLowp.cpp
tests/validation/CL/GEMMMatrixMultiplyNative.cpp
tests/validation/CL/GEMMMatrixMultiplyReshaped.cpp
tests/validation/CL/GEMMReshapeLHSMatrix.cpp
tests/validation/CL/Im2Col.cpp
tests/validation/CL/PoolingLayer.cpp
tests/validation/CL/Winograd.cpp

index 174b75398d1db1b49d476a8d6a1c88db6cf21a7a..bc2c99b6c8781588e527428db73e227e2dce9e1e 100644 (file)
@@ -33,7 +33,7 @@
  *
  * @note Datatype should be given as a preprocessor argument using -DDATA_TYPE=type. e.g. -DDATA_TYPE=short
  * @note Vector size should be given as a preprocessor argument using -DVEC_SIZE=size. e.g. -DVEC_SIZE=16
- * @note Leftover vector size has to be passed at compile time using -DVEC_SIZE_LEFTOVER. e.g. -DVEC_SIZE=3. It is defined as the remainder between the input's first dimension and VEC_SIZE
+ * @note Leftover vector size has to be passed at compile time using -DVEC_SIZE_LEFTOVER. e.g. -DVEC_SIZE_LEFTOVER=3. It is defined as the remainder between the input's first dimension and VEC_SIZE
  * @note Activation function should be given as a preprocessor argument using -DACT=name. e.g. -DACT=TANH
  * @note A, B variables required by some activation functions are set using -DA_VAL= and -DB_VAL= respectively.
  *
index c031c86a5e92408025e28cd51a1bcaaaaff8051f..66261019ab8e64a2152a66952c3919ed049ca90a 100644 (file)
@@ -36,7 +36,7 @@
  *
  * @note Datatype should be given as a preprocessor argument using -DDATA_TYPE=type. e.g. -DDATA_TYPE=short
  * @note Vector size should be given as a preprocessor argument using -DVEC_SIZE=size. e.g. -DVEC_SIZE=16
- * @note Leftover vector size has to be passed at compile time using -DVEC_SIZE_LEFTOVER. e.g. -DVEC_SIZE=3. It is defined as the remainder between the input's first dimension and VEC_SIZE
+ * @note Leftover vector size has to be passed at compile time using -DVEC_SIZE_LEFTOVER. e.g. -DVEC_SIZE_LEFTOVER=3. It is defined as the remainder between the input's first dimension and VEC_SIZE
  * @note A, B variables required by some activation functions are set using -DA_VAL= and -DB_VAL= respectively.
  * @note Quantization scales of the input/output tensors are passed in with -DS1_VAL= and -DS2_VAL= respectively.
  * @note Quantization offsets of the input/output tensors are passed in only if asymmetric with -DO1_VAL= and -DO2_VAL= respectively.
@@ -108,7 +108,7 @@ __kernel void activation_layer_quant_f32(
  *
  * @note Datatype should be given as a preprocessor argument using -DDATA_TYPE=type. e.g. -DDATA_TYPE=short
  * @note Vector size should be given as a preprocessor argument using -DVEC_SIZE=size. e.g. -DVEC_SIZE=16
- * @note Leftover vector size has to be passed at compile time using -DVEC_SIZE_LEFTOVER. e.g. -DVEC_SIZE=3. It is defined as the remainder between the input's first dimension and VEC_SIZE
+ * @note Leftover vector size has to be passed at compile time using -DVEC_SIZE_LEFTOVER. e.g. -DVEC_SIZE_LEFTOVER=3. It is defined as the remainder between the input's first dimension and VEC_SIZE
  * @note Activation function should be given as a preprocessor argument using -DACT=name. e.g. -DACT=TANH
  * @note A, B variables required by some activation functions are set using -DA_VAL= and -DB_VAL= respectively.
  * @note Quantization scales of the input/output tensors are passed in with -DS1_VAL= and -DS2_VAL= respectively.
index 8ddf8d8f9e79cd7a4053967de26c7dd2db932271..9f9538cb7604d07ba80242b0f2ffebed96c6ff9b 100644 (file)
@@ -91,6 +91,8 @@ void CLActivationLayerKernel::configure(const CLCompileContext &compile_context,
 {
     ARM_COMPUTE_ERROR_ON_NULLPTR(input);
 
+    auto padding_info = get_padding_info({ input, output });
+
     _run_in_place = (output == nullptr) || (output == input);
 
     if(output != nullptr)
@@ -207,6 +209,8 @@ void CLActivationLayerKernel::configure(const CLCompileContext &compile_context,
     _config_id += support::cpp11::to_string(input->dimension(0));
     _config_id += "_";
     _config_id += support::cpp11::to_string(input->dimension(1));
+
+    ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
 }
 
 Status CLActivationLayerKernel::validate(const ITensorInfo *input, const ITensorInfo *output, const ActivationLayerInfo &act_info)
index 7e9424f58b3e03af1b1ad81d1e40b135f918279f..ccd6a5a0fc5041f9fbfdc946b45e1f912754a159 100644 (file)
@@ -65,6 +65,8 @@ void CLBatchConcatenateLayerKernel::configure(const CLCompileContext &compile_co
     ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
     ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input, batch_offset, output));
 
+    auto padding_info = get_padding_info({ input, output });
+
     _batch_offset = batch_offset;
 
     const unsigned int num_elems_processed_per_iteration = adjust_vec_size(16 / input->element_size(), input->dimension(0));
@@ -109,6 +111,8 @@ void CLBatchConcatenateLayerKernel::configure(const CLCompileContext &compile_co
     _config_id += support::cpp11::to_string(input->dimension(2));
     _config_id += "_";
     _config_id += support::cpp11::to_string(input->dimension(3));
+
+    ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
 }
 
 Status CLBatchConcatenateLayerKernel::validate(const arm_compute::ITensorInfo *input,
index 9aeca3bcfe9023d120801ca21ebbb4d1517965cb..44bdc6f5872fd63d3b52b5a079a5e2988b7b3e33 100644 (file)
@@ -123,13 +123,14 @@ void CLBatchNormalizationLayerKernel::configure(const CLCompileContext &compile_
 {
     ARM_COMPUTE_ERROR_ON_NULLPTR(input, mean, var);
 
-    _input   = input;
-    _output  = output;
-    _mean    = mean;
-    _var     = var;
-    _beta    = beta;
-    _gamma   = gamma;
-    _epsilon = epsilon;
+    auto padding_info = get_padding_info({ input, output, mean, var, beta, gamma });
+    _input            = input;
+    _output           = output;
+    _mean             = mean;
+    _var              = var;
+    _beta             = beta;
+    _gamma            = gamma;
+    _epsilon          = epsilon;
 
     _run_in_place = (output == nullptr) || (output == input);
 
@@ -186,6 +187,8 @@ void CLBatchNormalizationLayerKernel::configure(const CLCompileContext &compile_
         ICLKernel::configure_internal(win_config.second);
     }
 
+    ARM_COMPUTE_ERROR_ON(input->info()->data_layout() == DataLayout::NHWC && has_padding_changed(padding_info));
+
     _config_id = "batch_normalization_layer_";
     _config_id += string_from_data_type(input->info()->data_type());
     _config_id += "_";
index 78adfd202fef49b71f0bd8340ee9a3a690e309b0..eb5bfc2d86a1ea880447ec1b585f260d0eb35c14 100644 (file)
@@ -64,6 +64,8 @@ void CLDepthConcatenateLayerKernel::configure(const CLCompileContext &compile_co
     ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
     ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input, depth_offset, output));
 
+    auto padding_info = get_padding_info({ input, output });
+
     _depth_offset = depth_offset;
 
     const unsigned int num_elems_processed_per_iteration = adjust_vec_size(16 / input->element_size(), input->dimension(0));
@@ -94,6 +96,8 @@ void CLDepthConcatenateLayerKernel::configure(const CLCompileContext &compile_co
 
     // Set output valid region
     output->set_valid_region(ValidRegion(Coordinates(), output->tensor_shape()));
+
+    ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
 }
 
 Status CLDepthConcatenateLayerKernel::validate(const arm_compute::ITensorInfo *input,
index 896ee119c1e886cc00b8ff37219ed67cc7f22d5f..efb3fe79e3783662efe522404739d0fe4f23cfe6 100644 (file)
@@ -333,11 +333,13 @@ void CLSaturatedArithmeticOperationKernel::configure(const CLCompileContext &com
 {
     ARM_COMPUTE_ERROR_ON_NULLPTR(input1, input2, output);
     ARM_COMPUTE_ERROR_THROW_ON(CLSaturatedArithmeticOperationKernel::validate(op, input1, input2, output, policy, act_info));
+    auto padding_info = get_padding_info({ input1, input2, output });
 
     _policy   = policy;
     _op       = op;
     _act_info = act_info;
     configure_common(compile_context, input1, input2, output);
+    ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
 }
 
 Status CLSaturatedArithmeticOperationKernel::validate(ArithmeticOperation op, const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ConvertPolicy &policy,
@@ -389,10 +391,12 @@ void CLArithmeticOperationKernel::configure(const CLCompileContext &compile_cont
 {
     ARM_COMPUTE_ERROR_ON_NULLPTR(input1, input2, output);
     ARM_COMPUTE_ERROR_THROW_ON(CLArithmeticOperationKernel::validate(op, input1, input2, output, act_info));
+    auto padding_info = get_padding_info({ input1, input2, output });
 
     _op       = op;
     _act_info = act_info;
     configure_common(compile_context, input1, input2, output);
+    ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
 }
 
 Status CLArithmeticOperationKernel::validate(ArithmeticOperation op, const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info)
index d0f016879ec105fd9bc14bc7e778b60f668ccb4f..6a58d5e202409c68350d5e3324cf3951b3ebb220 100644 (file)
@@ -84,6 +84,8 @@ void CLGEMMLowpQuantizeDownInt32ScaleByFixedPointKernel::configure(const CLCompi
     ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
     ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), (bias != nullptr) ? bias->info() : nullptr, output->info(), info));
 
+    auto padding_info = get_padding_info({ input, bias, output });
+
     // Output auto inizialitation if not yet initialized
     auto_init_if_empty(*output->info(), input->info()->clone()->set_data_type(info->output_data_type));
 
@@ -116,6 +118,8 @@ void CLGEMMLowpQuantizeDownInt32ScaleByFixedPointKernel::configure(const CLCompi
     // Configure kernel window
     auto win = calculate_max_window(*output->info(), Steps(num_elems_processed_per_iteration));
     ICLKernel::configure_internal(win);
+
+    ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
 }
 
 void CLGEMMLowpQuantizeDownInt32ScaleByFixedPointKernel::run(const Window &window, cl::CommandQueue &queue)
index 1d29dfe4b3484f8c99f6b2e84ae985a852649579..a5888a5ded0128bf074bdfd71c33c60a7abd56bb 100644 (file)
@@ -94,6 +94,8 @@ void CLGEMMLowpQuantizeDownInt32ScaleByFloatKernel::configure(const CLCompileCon
     ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
     ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), (bias != nullptr) ? bias->info() : nullptr, output->info(), info));
 
+    auto padding_info = get_padding_info({ input, bias, output });
+
     // Output auto inizialitation if not yet initialized
     auto_init_if_empty(*output->info(), input->info()->clone()->set_data_type(info->output_data_type));
 
@@ -123,6 +125,8 @@ void CLGEMMLowpQuantizeDownInt32ScaleByFloatKernel::configure(const CLCompileCon
     // Configure kernel window
     Window win = calculate_max_window(*input->info(), Steps(num_elems_processed_per_iteration));
     ICLKernel::configure_internal(win);
+
+    ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
 }
 
 void CLGEMMLowpQuantizeDownInt32ScaleByFloatKernel::run(const Window &window, cl::CommandQueue &queue)
index d32d328fc262ea802bff462cfe4f627b8dbb15cd..7d4352479cad1d49dd195bde67b5a5dfcfc5a8b9 100644 (file)
@@ -92,6 +92,8 @@ void CLGEMMLowpQuantizeDownInt32ScaleKernel::configure(const CLCompileContext &c
                                                   output->info(),
                                                   output_stage));
 
+    auto padding_info = get_padding_info({ input, bias, output });
+
     // Output auto inizialitation if not yet initialized
     auto_init_if_empty(*output->info(), input->info()->clone()->set_data_type(output_stage->output_data_type));
 
@@ -123,6 +125,8 @@ void CLGEMMLowpQuantizeDownInt32ScaleKernel::configure(const CLCompileContext &c
     // Configure kernel window
     Window win = calculate_max_window(*input->info(), Steps(num_elems_processed_per_iteration));
     ICLKernel::configure_internal(win);
+
+    ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
 }
 
 void CLGEMMLowpQuantizeDownInt32ScaleKernel::run(const Window &window, cl::CommandQueue &queue)
index f613937f54d6e26eb2195dc11052e27fca056d83..387f1a4ebca848d9aac56cbd02f672cfe4e0141d 100644 (file)
@@ -219,6 +219,7 @@ void CLGEMMMatrixMultiplyNativeKernel::configure(const CLCompileContext &compile
 
     ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input0->info(), input1->info(), (input2 != nullptr ? input2->info() : nullptr), output->info(), alpha, beta, lhs_info, rhs_info, gemm_info));
 
+    auto padding_info         = get_padding_info({ input0, output });
     _input0                   = input0;
     _input1                   = input1;
     _input2                   = helpers::float_ops::is_zero(beta) ? nullptr : input2;
@@ -317,6 +318,8 @@ void CLGEMMMatrixMultiplyNativeKernel::configure(const CLCompileContext &compile
     _config_id += support::cpp11::to_string(rhs_info.n0);
     _config_id += "_";
     _config_id += support::cpp11::to_string(rhs_info.k0);
+
+    ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
 }
 
 Status CLGEMMMatrixMultiplyNativeKernel::validate(const ITensorInfo *input0, const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, float alpha, float beta,
index fb15b42fe2b66ca0037f7a19a20aa277279d982c..23e18bac92a90fa674c6d48df34f648b54835ee0 100644 (file)
@@ -225,6 +225,7 @@ void CLGEMMMatrixMultiplyReshapedKernel::configure(const CLCompileContext &compi
 
     ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input0->info(), input1->info(), (input2 != nullptr ? input2->info() : nullptr), output->info(), alpha, beta, lhs_info, rhs_info, gemm_info));
 
+    auto padding_info         = get_padding_info({ input0, output });
     _input0                   = input0;
     _input1                   = input1;
     _input2                   = helpers::float_ops::is_zero(beta) ? nullptr : input2;
@@ -328,6 +329,8 @@ void CLGEMMMatrixMultiplyReshapedKernel::configure(const CLCompileContext &compi
     _config_id += support::cpp11::to_string(lhs_info.interleave);
     _config_id += "_";
     _config_id += support::cpp11::to_string(rhs_info.interleave);
+
+    ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
 }
 
 Status CLGEMMMatrixMultiplyReshapedKernel::validate(const ITensorInfo *input0, const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, float alpha, float beta,
index 3e2fc79704669a7021f4bc668f27fcba53d34982..52510075b7ebf42fb1a607a4cff93767eb1968d5 100644 (file)
@@ -125,6 +125,8 @@ void CLGEMMReshapeLHSMatrixKernel::configure(const CLCompileContext &compile_con
     // Perform validate step
     ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), lhs_info, reinterpret_input_as_3d));
 
+    auto padding_info = get_padding_info({ input });
+
     _input                   = input;
     _output                  = output;
     _reinterpret_input_as_3d = reinterpret_input_as_3d;
@@ -180,6 +182,8 @@ void CLGEMMReshapeLHSMatrixKernel::configure(const CLCompileContext &compile_con
     _config_id += support::cpp11::to_string(lhs_info.interleave);
     _config_id += "_";
     _config_id += support::cpp11::to_string(lhs_info.transpose);
+
+    ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
 }
 
 Status CLGEMMReshapeLHSMatrixKernel::validate(const ITensorInfo *input, const ITensorInfo *output, const GEMMLHSMatrixInfo &lhs_info, bool reinterpret_input_as_3d)
index 0789cdc8a7d91ac4a56b42e66224f31974058a64..07309de83c67366a48e14879709483150b7b5e7e 100644 (file)
@@ -317,7 +317,8 @@ void CLIm2ColKernel::configure(const CLCompileContext &compile_context, const IC
     ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
     ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), kernel_dims, conv_info, has_bias, dilation, num_groups));
 
-    _data_layout = input->info()->data_layout();
+    auto padding_info = get_padding_info({ input, output });
+    _data_layout      = input->info()->data_layout();
 
     const unsigned int width_idx    = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::WIDTH);
     const unsigned int height_idx   = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::HEIGHT);
@@ -358,6 +359,8 @@ void CLIm2ColKernel::configure(const CLCompileContext &compile_context, const IC
     _config_id += support::cpp11::to_string(output->info()->dimension(1));
     _config_id += "_";
     _config_id += lower_string(string_from_data_layout(_data_layout));
+
+    ARM_COMPUTE_ERROR_ON(input->info()->data_layout() == DataLayout::NHWC && has_padding_changed(padding_info));
 }
 
 Status CLIm2ColKernel::validate(const ITensorInfo *input, const ITensorInfo *output, const Size2D &kernel_dims, const PadStrideInfo &conv_info, bool has_bias, const Size2D &dilation,
index 905610c31fa35dc025278175b451ceb73dc84a06..79843cd2996ccd8c911c1b4c95dffe59d0073fdf 100644 (file)
@@ -216,6 +216,8 @@ void CLPoolingLayerKernel::configure(const CLCompileContext &compile_context, co
 {
     ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
 
+    auto padding_info = get_padding_info({ input, output, indices });
+
     // Set instance variables
     _input                              = input;
     _output                             = output;
@@ -419,6 +421,8 @@ void CLPoolingLayerKernel::configure(const CLCompileContext &compile_context, co
     _config_id += support::cpp11::to_string(output->info()->dimension(idx_channel));
     _config_id += "_";
     _config_id += lower_string(string_from_data_layout(input->info()->data_layout()));
+
+    ARM_COMPUTE_ERROR_ON(input->info()->data_layout() == DataLayout::NHWC && has_padding_changed(padding_info));
 }
 
 Status CLPoolingLayerKernel::validate(const ITensorInfo *input, const ITensorInfo *output, const PoolingLayerInfo &pool_info, const ITensorInfo *indices)
index 6f695c93db1aad2eee89b8f1bd74e787695d07ea..695e1cbbf16794db6543de5d9f3f369485b1da36 100644 (file)
@@ -115,6 +115,8 @@ void CLWinogradInputTransformKernel::configure(const CLCompileContext &compile_c
     ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
     ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), winograd_info));
 
+    auto padding_info = get_padding_info({ input, output });
+
     const PadStrideInfo conv_info        = winograd_info.convolution_info;
     const Size2D        output_tile_size = winograd_info.output_tile_size;
     const Size2D        kernel_size      = winograd_info.kernel_size;
@@ -204,6 +206,8 @@ void CLWinogradInputTransformKernel::configure(const CLCompileContext &compile_c
     ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
     ICLKernel::configure_internal(win_config.second, cl::NDRange(1, 1, 8));
 
+    ARM_COMPUTE_ERROR_ON((input->info()->data_layout() == DataLayout::NHWC) && has_padding_changed(padding_info));
+
     _config_id = kernel_name;
     _config_id += support::cpp11::to_string(input->info()->dimension(0));
     _config_id += "_";
index f776e334a07de7949b11a8274bd223b6ba86feff..9b725a44e791de9ad0b2cc75f8dab4abc52969a2 100644 (file)
@@ -90,28 +90,6 @@ const auto CNNDataTypes = framework::dataset::make("DataType",
 /** Input data sets. */
 const auto ActivationDataset = combine(combine(framework::dataset::make("InPlace", { false, true }), datasets::ActivationFunctions()), framework::dataset::make("AlphaBeta", { 0.5f, 1.f }));
 
-/** Zero padding test */
-bool validate_zero_padding(unsigned int width, unsigned int height, unsigned int channels, unsigned int batches, const ActivationLayerInfo &act_info, DataType data_type)
-{
-    TensorShape shape(width, height, channels, batches);
-
-    // Create tensors
-    CLTensor src = create_tensor<CLTensor>(shape, data_type);
-    CLTensor dst = create_tensor<CLTensor>(shape, data_type);
-
-    src.info()->set_quantization_info(QuantizationInfo(1.f / 256.f, 0));
-    dst.info()->set_quantization_info(QuantizationInfo(1.f / 256.f, 0));
-
-    ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
-    ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
-
-    // Create and configure function
-    CLActivationLayer act;
-    act.configure(&src, &dst, act_info);
-
-    // Padding can be added along rhs and bias's X dimension
-    return src.info()->padding().empty() && dst.info()->padding().empty();
-}
 } // namespace
 
 TEST_SUITE(CL)
@@ -155,26 +133,6 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
     ARM_COMPUTE_EXPECT(bool(CLActivationLayer::validate(&input_info.clone()->set_is_resizable(false), (output_info.total_size() == 0) ? nullptr : &output_info.clone()->set_is_resizable(false), act_info)) == expected, framework::LogLevel::ERRORS);
 }
 
-/** Validate zero padding tests
- *
- * A series of validation tests to check that no padding is added as part of configuration for 4 different scenarios.
- *
- * Checks performed in order:
- *     - First dimension multiple of 16
- *     - First dimension non-multiple of 16
- *     - First dimension less than 16 (vec_size for qasymm8) but multiple
- *     - First dimension less than 16 (vec_size for qasymm8) non-multiple
- *     - Tensor with only one element
- */
-DATA_TEST_CASE(ValidateZeroPadding, framework::DatasetMode::ALL, zip(
-framework::dataset::make("Width",    { 32U, 37U, 12U, 13U, 1U }),
-framework::dataset::make("DataType", { DataType::F32, DataType::QASYMM8 })),
-width, data_type)
-{
-    const bool one_elem = (width == 1U);
-    bool status = validate_zero_padding(width, one_elem ? 1U : 17U, one_elem ? 1U : 7U, one_elem ? 1U : 2U, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 1U, 6U), data_type);
-    ARM_COMPUTE_EXPECT(status, framework::LogLevel::ERRORS);
-}
 // clang-format on
 // *INDENT-ON*
 
index 6635c25fe7078c31e3075bb0e861529c63d4a183..c74f6a3b238f657c2ab2a86583e4f4befb860519 100644 (file)
@@ -124,21 +124,6 @@ TEST_CASE(FusedActivation, framework::DatasetMode::ALL)
     ARM_COMPUTE_EXPECT(bool(result) == false, framework::LogLevel::ERRORS);
 }
 
-TEST_CASE(ValidateZeroPadding, framework::DatasetMode::ALL)
-{
-    CLTensor src1 = create_tensor<CLTensor>(TensorShape(3U, 3U), DataType::F32, 1, QuantizationInfo());
-    CLTensor src2 = create_tensor<CLTensor>(TensorShape(3U, 3U), DataType::F32, 1, QuantizationInfo());
-    CLTensor dst  = create_tensor<CLTensor>(TensorShape(3U, 3U), DataType::F32, 1, QuantizationInfo());
-
-    // Create and configure function
-    CLArithmeticAddition add;
-    add.configure(&src1, &src2, &dst, ConvertPolicy::WRAP);
-
-    validate(src1.info()->padding(), PaddingSize(0, 0, 0, 0));
-    validate(src2.info()->padding(), PaddingSize(0, 0, 0, 0));
-    validate(dst.info()->padding(), PaddingSize(0, 0, 0, 0));
-}
-
 template <typename T>
 using CLArithmeticAdditionFixture = ArithmeticAdditionValidationFixture<CLTensor, CLAccessor, CLArithmeticAddition, T>;
 
index e5de3a75c7fc51860ec8c6f568a8844bb5b474a6..522a6ab8ee36693765ef272f67687ff68d77d598 100644 (file)
@@ -39,37 +39,6 @@ namespace test
 {
 namespace validation
 {
-namespace
-{
-/** Zero padding test */
-bool validate_zero_padding(unsigned int width, unsigned int height, unsigned int channels, unsigned int batches, DataType data_type)
-{
-    TensorShape src_shape(width, height, channels, batches);
-    TensorShape dst_shape(width, height, channels, batches * 2);
-
-    // Create tensors
-    CLTensor src0 = create_tensor<CLTensor>(src_shape, data_type);
-    CLTensor src1 = create_tensor<CLTensor>(src_shape, data_type);
-    CLTensor dst  = create_tensor<CLTensor>(dst_shape, data_type);
-
-    src0.info()->set_quantization_info(QuantizationInfo(1.f / 256.f, 0));
-    src1.info()->set_quantization_info(QuantizationInfo(1.f / 256.f, 0));
-    dst.info()->set_quantization_info(QuantizationInfo(1.f / 256.f, 0));
-
-    ARM_COMPUTE_EXPECT(src0.info()->is_resizable(), framework::LogLevel::ERRORS);
-    ARM_COMPUTE_EXPECT(src1.info()->is_resizable(), framework::LogLevel::ERRORS);
-    ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
-
-    std::vector<const ICLTensor *> srcs = { &src0, &src1 };
-
-    // Create and configure function
-    CLConcatenateLayer concat;
-    concat.configure(srcs, &dst, 3U);
-
-    // Padding can be added along rhs and bias's X dimension
-    return src0.info()->padding().empty() && src1.info()->padding().empty() && dst.info()->padding().empty();
-}
-}
 TEST_SUITE(CL)
 TEST_SUITE(BatchConcatenateLayer)
 
@@ -112,26 +81,6 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
     ARM_COMPUTE_EXPECT(is_valid == expected, framework::LogLevel::ERRORS);
 }
 
-/** Validate zero padding tests
- *
- * A series of validation tests to check that no padding is added as part of configuration for 4 different scenarios.
- *
- * Checks performed in order:
- *     - First dimension multiple of 16
- *     - First dimension non-multiple of 16
- *     - First dimension less than 16 (vec_size for qasymm8) but multiple
- *     - First dimension less than 16 (vec_size for qasymm8) non-multiple
- *     - Tensor with only one element
- */
-DATA_TEST_CASE(ValidateZeroPadding, framework::DatasetMode::ALL, zip(
-framework::dataset::make("Width",    { 32U, 37U, 12U, 13U, 1U }),
-framework::dataset::make("DataType", { DataType::F32, DataType::QASYMM8 })),
-width, data_type)
-{
-    const bool one_elem = (width == 1U);
-    bool status = validate_zero_padding(width, one_elem ? 1U : 17U, one_elem ? 1U : 7U, one_elem ? 1U : 2U, data_type);
-    ARM_COMPUTE_EXPECT(status, framework::LogLevel::ERRORS);
-}
 // clang-format on
 // *INDENT-ON*
 
index 88f00b0eff7ff0c218ef1291d20f90232759c889..8b3bdbc3ead5fb576fa1617c8b280c9f1217d05e 100644 (file)
@@ -64,28 +64,6 @@ framework::dataset::make("UseBeta", { false, true })),
 framework::dataset::make("UseGamma", { false, true })),
 framework::dataset::make("Epsilon", { 0.001f }));
 
-bool validate_zero_padding(TensorShape shape0, const TensorShape shape1, float epsilon, ActivationLayerInfo act_info, DataType dt, DataLayout data_layout)
-{
-    if(data_layout == DataLayout::NHWC)
-    {
-        permute(shape0, PermutationVector(2U, 0U, 1U));
-    }
-
-    // Create tensors
-    CLTensor src   = create_tensor<CLTensor>(shape0, dt, 1, QuantizationInfo(), data_layout);
-    CLTensor dst   = create_tensor<CLTensor>(shape0, dt, 1, QuantizationInfo(), data_layout);
-    CLTensor mean  = create_tensor<CLTensor>(shape1, dt, 1);
-    CLTensor var   = create_tensor<CLTensor>(shape1, dt, 1);
-    CLTensor beta  = create_tensor<CLTensor>(shape1, dt, 1);
-    CLTensor gamma = create_tensor<CLTensor>(shape1, dt, 1);
-
-    // Create and configure function
-    CLBatchNormalizationLayer norm;
-    norm.configure(&src, &dst, &mean, &var, &beta, &gamma, epsilon, act_info);
-
-    return src.info()->padding().empty() && dst.info()->padding().empty() && mean.info()->padding().empty() && var.info()->padding().empty() && beta.info()->padding().empty()
-           && gamma.info()->padding().empty();
-}
 } // namespace
 
 TEST_SUITE(CL)
@@ -142,14 +120,6 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(
 // clang-format on
 // *INDENT-ON*
 
-DATA_TEST_CASE(ValidateZeroPadding, framework::DatasetMode::ALL, combine(combine(combine(datasets::SmallRandomBatchNormalizationLayerDataset(), act_infos), framework::dataset::make("DataType", { DataType::F32, DataType::F16 })),
-                                                                         framework::dataset::make("DataLayout", { DataLayout::NHWC })),
-               shape0, shape1, episilon, act_infos, data_type, data_layout)
-{
-    bool status = validate_zero_padding(shape0, shape1, episilon, act_infos, data_type, data_layout);
-    ARM_COMPUTE_EXPECT(status, framework::LogLevel::ERRORS);
-}
-
 TEST_SUITE(Float)
 TEST_SUITE(FP32)
 FIXTURE_DATA_TEST_CASE(Random, CLBatchNormalizationLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::SmallRandomBatchNormalizationLayerDataset(),
index 00f831b2e213802544b5f1c158246c906f07a615..5a1971b54c72543ce5477573cbf3b5b694905f12 100644 (file)
@@ -47,25 +47,6 @@ namespace validation
 namespace
 {
 constexpr AbsoluteTolerance<float> tolerance_quant(1); /**< Tolerance value for comparing reference's output against implementation's output for quantized data types */
-
-bool validate_output_stage_zero_padding(const TensorShape shape, const DataType dt, const GEMMLowpOutputStageType type)
-{
-    // Create tensors
-    CLTensor src  = create_tensor<CLTensor>(shape, DataType::S32, 1);
-    CLTensor bias = create_tensor<CLTensor>(TensorShape(shape.x()), DataType::S32, 1);
-    CLTensor dst  = create_tensor<CLTensor>(shape, dt, 1);
-
-    GEMMLowpOutputStageInfo info;
-    info.type             = type;
-    info.output_data_type = dt;
-    std::tie(info.gemmlowp_min_bound, info.gemmlowp_max_bound) = quantization::get_min_max_values_from_quantized_data_type(dt);
-
-    // Create and configure function
-    CLGEMMLowpOutputStage output_stage;
-    output_stage.configure(&src, &bias, &dst, info);
-
-    return src.info()->padding().empty() && bias.info()->padding().empty() && dst.info()->padding().empty();
-}
 }
 TEST_SUITE(CL)
 TEST_SUITE(GEMMLowp)
@@ -147,13 +128,6 @@ TEST_SUITE(OutputStage)
 
 TEST_SUITE(QuantizeDownInt32Scale)
 
-DATA_TEST_CASE(ValidateZeroPadding, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), framework::dataset::make("DataType", { DataType::QASYMM8, DataType::QASYMM8_SIGNED })),
-               shape, data_type)
-{
-    bool status = validate_output_stage_zero_padding(shape, data_type, GEMMLowpOutputStageType::QUANTIZE_DOWN);
-    ARM_COMPUTE_EXPECT(status, framework::LogLevel::ERRORS);
-}
-
 TEST_SUITE(QASYMM8)
 
 const auto quantize_down_int32_to_uint8_scale_cases = framework::dataset::make("result_offset", -2, 1) * framework::dataset::make("result_mult_int", 1, 2) * framework::dataset::make("result_shift", 2,
@@ -212,12 +186,6 @@ TEST_SUITE_END() // QASYMM8_SIGNED
 TEST_SUITE_END() // QuantizeDownInt32Scale
 
 TEST_SUITE(QuantizeDownInt32ScaleByFixedPoint)
-DATA_TEST_CASE(ValidateZeroPadding, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), framework::dataset::make("DataType", { DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::QSYMM16 })),
-               shape, data_type)
-{
-    bool status = validate_output_stage_zero_padding(shape, data_type, GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT);
-    ARM_COMPUTE_EXPECT(status, framework::LogLevel::ERRORS);
-}
 
 TEST_SUITE(QASYMM8)
 
@@ -353,13 +321,6 @@ TEST_SUITE_END() // QuantizeDownInt32ScaleByFixedPoint
 
 TEST_SUITE(QuantizeDownInt32ScaleByFloat)
 
-DATA_TEST_CASE(ValidateZeroPadding, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), framework::dataset::make("DataType", { DataType::QASYMM8, DataType::QASYMM8_SIGNED })),
-               shape, data_type)
-{
-    bool status = validate_output_stage_zero_padding(shape, data_type, GEMMLowpOutputStageType::QUANTIZE_DOWN_FLOAT);
-    ARM_COMPUTE_EXPECT(status, framework::LogLevel::ERRORS);
-}
-
 TEST_SUITE(QASYMM8)
 using CLGEMMLowpQuantizeDownInt32ScaleByFloatFixture =
     GEMMLowpQuantizeDownInt32ScaleByFloatValidationFixture<CLTensor, CLAccessor, CLGEMMLowpOutputStage, uint8_t>;
index 1cf1209dee4fcc70cade4795492f0349891613af..ec6b87fbae86957ea8a286e954d4b5293e68c2a3 100644 (file)
@@ -186,55 +186,6 @@ void validate_configuration(unsigned int m_value, unsigned int n_value, unsigned
     CLGEMMMatrixMultiplyNative gemm;
     gemm.configure(&lhs, &rhs, &bias, &dst, 1.0f, 1.0f, lhs_info, rhs_info, kernel_info);
 }
-/** Zero padding test */
-bool validate_zero_padding(unsigned int m_value, unsigned int n_value, unsigned int k_value, unsigned int b_value, unsigned int m0_value, unsigned int n0_value, unsigned int k0_value, bool broadcast_bias, DataType data_type, const ActivationLayerInfo &act_info)
-{
-    const unsigned int M = m_value;
-    const unsigned int N = n_value;
-    const unsigned int K = k_value;
-
-    GEMMLHSMatrixInfo lhs_info;
-    lhs_info.m0         = m0_value;
-    lhs_info.k0         = k0_value;
-
-    GEMMRHSMatrixInfo rhs_info;
-    rhs_info.n0         = n0_value;
-    rhs_info.k0         = k0_value;
-
-    GEMMKernelInfo kernel_info;
-    kernel_info.m               = M;
-    kernel_info.n               = N;
-    kernel_info.k               = K;
-    kernel_info.broadcast_bias  = broadcast_bias;
-    kernel_info.activation_info = act_info;
-
-    const TensorShape lhs_shape(K, M, b_value);
-    const TensorShape rhs_shape(N, K, b_value);
-    const TensorShape bias_shape(N,
-                                 broadcast_bias? 1 : M,
-                                 broadcast_bias? 1 : b_value);
-    const TensorShape dst_shape = compute_mm_shape(TensorInfo(lhs_shape, 1, data_type),
-                                                   TensorInfo(rhs_shape, 1, data_type),
-                                                   kernel_info);
-
-    // Create tensors
-    CLTensor lhs  = create_tensor<CLTensor>(lhs_shape, data_type);
-    CLTensor rhs  = create_tensor<CLTensor>(rhs_shape, data_type);
-    CLTensor bias = create_tensor<CLTensor>(bias_shape, data_type);
-    CLTensor dst  = create_tensor<CLTensor>(dst_shape, data_type);
-
-    ARM_COMPUTE_EXPECT(lhs.info()->is_resizable(), framework::LogLevel::ERRORS);
-    ARM_COMPUTE_EXPECT(rhs.info()->is_resizable(), framework::LogLevel::ERRORS);
-    ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
-    ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
-
-    // Create and configure function
-    CLGEMMMatrixMultiplyNative gemm;
-    gemm.configure(&lhs, &rhs, &bias, &dst, 1.0f, 1.0f, lhs_info, rhs_info, kernel_info);
-
-    // Padding can be added along rhs and bias's X dimension
-    return dst.info()->padding().empty() && lhs.info()->padding().empty() && bias.info()->padding().bottom == 0 && bias.info()->padding().top == 0;
-}
 } // namespace
 
 TEST_SUITE(CL)
@@ -256,29 +207,6 @@ m_value, n_value, k_value, b_value, m0_value, n0_value, k0_value, broadcast_bias
     validate_configuration(m_value, n_value, k_value, b_value, m0_value, n0_value, k0_value, broadcast_bias, DataType::F32, act_value);
 }
 
-/** Validate zero padding tests
- *
- * A series of validation tests to check that no padding is added as part of configuration for 4 different scenarios.
- *
- * Checks performed in order:
- *     - No partial blocks in both x and y dimensions
- *     - Partial blocks in x dimension
- *     - Partial blocks in y dimension
- *     - Partial blocks in both x and y dimensions
- *     - No blocks in both x and y dimensions, scalar store (N0==1)
- *     - Special case: partial_n0 == 5 (vstore1 should be invoked instead of vstore_partial_1)
- */
-DATA_TEST_CASE(ValidateZeroPadding, framework::DatasetMode::ALL, zip(zip(zip(
-framework::dataset::make("M",                   { 24, 64, 101,   1, 50, 256, }),
-framework::dataset::make("N",                   { 48, 29,  16, 122, 20,  21, })),
-framework::dataset::make("M0",                  { 4,   8,   7,   2,  1,   8, })),
-framework::dataset::make("N0",                  { 4,   4,  16,   3,  1,   8, })),
-m_value, n_value, m0_value, n0_value)
-{
-    bool status = validate_zero_padding(m_value, n_value, 23, 1, m0_value, n0_value, 4, false, DataType::F32, ActivationLayerInfo());
-    ARM_COMPUTE_EXPECT(status, framework::LogLevel::ERRORS);
-}
-
 FIXTURE_DATA_TEST_CASE(RunSmallBoundaryHandlingPartialInXPartialInY, CLGEMMMatrixMultiplyNativeFixture<float>, framework::DatasetMode::ALL,
                 combine(combine(
                         framework::dataset::make("M", 3),
index 0a0a1fc397a5e9842888b4bf05f3d6de8e7223c8..95979b3131b6ba9071f2db1f44f7c6118e0c045b 100644 (file)
@@ -171,100 +171,11 @@ const auto broadcast_bias_values = framework::dataset::make("broadcast_bias", {
 /** LHS transposed values */
 const auto lhs_transpose_values = framework::dataset::make("lhs_transpose", { false, true } );
 
-/** Zero padding test */
-bool validate_zero_padding(unsigned int m_value, unsigned int n_value, unsigned int k_value, unsigned int b_value,
-                            unsigned int m0_value, unsigned int n0_value, unsigned int k0_value, unsigned int h0_value,
-                            bool i_value_rhs, bool t_value_rhs, bool export_to_cl_image, bool broadcast_bias, unsigned int depth_output_gemm3d, const ActivationLayerInfo &act_info,
-                            DataType dt_input0, DataType dt_input1, DataType dt_input2, DataType dt_output, float alpha, float beta)
-{
-    const unsigned int M = m_value;
-    const unsigned int N = n_value;
-    const unsigned int K = k_value;
-
-    GEMMLHSMatrixInfo lhs_info;
-    lhs_info.m0         = m0_value;
-    lhs_info.k0         = k0_value;
-
-    GEMMRHSMatrixInfo rhs_info;
-    rhs_info.n0         = n0_value;
-    rhs_info.k0         = k0_value;
-    rhs_info.h0         = h0_value;
-    rhs_info.interleave = i_value_rhs;
-    rhs_info.transpose  = t_value_rhs;
-    rhs_info.export_to_cl_image = export_to_cl_image;
-
-    GEMMKernelInfo kernel_info;
-    kernel_info.m                       = M;
-    kernel_info.n                       = N;
-    kernel_info.k                       = K;
-    kernel_info.depth_output_gemm3d     = depth_output_gemm3d;
-    kernel_info.reinterpret_input_as_3d = false;
-    kernel_info.broadcast_bias          = broadcast_bias;
-    kernel_info.activation_info         = act_info;
-
-    const TensorShape lhs_shape(K, M, b_value);
-    const TensorShape rhs_shape(N, K, b_value);
-    const TensorShape lhs_shape_reshaped = compute_lhs_reshaped_shape(TensorInfo(lhs_shape, 1, dt_input0),
-                                                                      lhs_info);
-    const TensorShape rhs_shape_reshaped = compute_rhs_reshaped_shape(TensorInfo(rhs_shape, 1, dt_input1),
-                                                                      rhs_info);
-
-    const TensorShape dst_shape = compute_mm_shape(TensorInfo(lhs_shape_reshaped, 1, dt_input0),
-                                                   TensorInfo(rhs_shape_reshaped, 1, dt_input1),
-                                                   kernel_info);
-
-    const TensorShape bias_shape(N,
-                                 M, // Correct calculation should be: broadcast_bias? 1 : M, it's wrong here on purpose just for validation test
-                                 broadcast_bias? 1 : b_value);
-
-    // Create tensors
-    CLTensor lhs_reshaped  = create_tensor<CLTensor>(lhs_shape_reshaped, dt_input0);
-    CLTensor rhs_reshaped  = create_tensor<CLTensor>(rhs_shape_reshaped, dt_input1);
-    CLTensor bias = create_tensor<CLTensor>(bias_shape, dt_input2);
-    CLTensor dst  = create_tensor<CLTensor>(dst_shape, dt_output);
-
-    ARM_COMPUTE_EXPECT(lhs_reshaped.info()->is_resizable(), framework::LogLevel::ERRORS);
-    ARM_COMPUTE_EXPECT(rhs_reshaped.info()->is_resizable(), framework::LogLevel::ERRORS);
-    ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
-    ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
-
-    // Validate zero-padding
-    CLGEMMMatrixMultiplyReshaped gemm;
-
-    gemm.configure(&lhs_reshaped, &rhs_reshaped, &bias, &dst, alpha, beta, lhs_info, rhs_info, kernel_info);
-
-    // Padding can be added along rhs and bias's X/Y dimension
-    return dst.info()->padding().empty() && lhs_reshaped.info()->padding().empty();
-}
 } // namespace
 
 TEST_SUITE(CL)
 TEST_SUITE(GEMMMatrixMultiplyReshaped)
 
-/** Validate zero padding tests
- *
- * A series of validation tests to check the zero padding requirement
- *
- * Checks performed in order:
- *     - No partial blocks in both x and y dimensions
- *     - Partial blocks in x dimension
- *     - Partial blocks in y dimension
- *     - Partial blocks in both x and y dimensions
- *     - Special case: partial_n0 == 9 (vstore1 should be invoked instead of vstore_partial_1)
- */
-DATA_TEST_CASE(ValidateZeroPadding, framework::DatasetMode::ALL, zip(zip(zip(
-framework::dataset::make("M",                   { 24, 64, 101,   1, 103 }),
-framework::dataset::make("N",                   { 48, 29,  16, 121,  41 })),
-framework::dataset::make("M0",                  {  4,  8,   4,   2,   4 })),
-framework::dataset::make("N0",                  {  4,  4,  16,   2,  16 })),
-m_value, n_value, m0_value, n0_value)
-{
-    constexpr DataType dt = DataType::F32;
-
-    bool status = validate_zero_padding(m_value, n_value, 23, 1, m0_value, n0_value, 4, 1, false, false, false, 0, 0, ActivationLayerInfo(), dt, dt, dt, dt, 1.0f, 1.0f);
-    ARM_COMPUTE_EXPECT(status, framework::LogLevel::ERRORS);
-}
-
 // *INDENT-OFF*
 // clang-format off
 DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip(zip(
index 4af495944e9d473c46f508240b9befc8f27e29bf..34c37dffdea742f7e9a79d9b546b1ec0112b4960 100644 (file)
@@ -82,68 +82,11 @@ const auto i_values = framework::dataset::make("interleave", { true, false });
 /** Transpose values to test */
 const auto t_values = framework::dataset::make("transpose", { true, false });
 
-/** Zero padding test */
-bool validate_zero_padding(unsigned int m_value, unsigned int k_value, unsigned int b_value, unsigned int m0_value, unsigned int k0_value, unsigned int v0_value,
-                            bool i_value_lhs, bool t_value_lhs, bool input_as_3d, DataType dt)
-{
-    const unsigned int M = m_value;
-    const unsigned int K = k_value;
-    const unsigned int B = b_value;
-
-    GEMMLHSMatrixInfo lhs_info;
-    lhs_info.m0 = m0_value;
-    lhs_info.k0 = k0_value;
-    lhs_info.v0 = v0_value;
-    lhs_info.interleave = i_value_lhs;
-    lhs_info.transpose = t_value_lhs;
-
-    const TensorShape lhs_shape(K, M, B);
-    const TensorShape lhs_shape_reshaped = compute_lhs_reshaped_shape(TensorInfo(lhs_shape, 1, dt), lhs_info, input_as_3d);
-
-    // Create tensors
-    CLTensor lhs = create_tensor<CLTensor>(lhs_shape, dt);
-    CLTensor dst = create_tensor<CLTensor>(lhs_shape_reshaped, dt);
-
-    ARM_COMPUTE_EXPECT(lhs.info()->is_resizable(), framework::LogLevel::ERRORS);
-    ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
-
-    // Validate zero-padding
-    CLGEMMReshapeLHSMatrixKernel lhs_reshape;
-
-    lhs_reshape.configure(&lhs, &dst, lhs_info, input_as_3d);
-
-    return lhs.info()->padding().empty();
-}
 } // namespace
 
 TEST_SUITE(CL)
 TEST_SUITE(GEMMReshapeLHSMatrix)
 
-/** Validate zero padding tests for the LHS input tensor
- *
- * A series of validation tests to test the zero padding requirement
- *
- * Checks performed in order:
- *     - Case where M and K are smaller than M0 and K0
- *     - Generic test case with batch size = 1
- *     - Generic test case with batch size = 4
- *     - Generic test case with input_as_3d_value = true
- */
-DATA_TEST_CASE(ValidateZeroPadding, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(
-framework::dataset::make("M",                   { 1, 23, 63, 101 }),
-framework::dataset::make("K",                   { 1, 47, 29,  27 })),
-framework::dataset::make("B",                   { 1, 1, 4, 7 })),
-framework::dataset::make("M0",                  { 4, 2, 4, 8 })),
-framework::dataset::make("K0",                  { 2, 2, 4, 8 })),
-framework::dataset::make("input_as_3d",         { false, false, false, true })),
-m_value, k_value, b_value, m0_value, k0_value, input_as_3d_value)
-{
-    constexpr DataType dt = DataType::F32;
-
-    bool status = validate_zero_padding(m_value, k_value, b_value, m0_value, k0_value, 2, false, false, input_as_3d_value, dt);
-    ARM_COMPUTE_EXPECT(status, framework::LogLevel::ERRORS);
-}
-
 FIXTURE_DATA_TEST_CASE(S32, CLGEMMReshapeLHSMatrixFixture<int>, framework::DatasetMode::ALL,
                 combine(combine(combine(combine(combine(combine(combine(datasets::SmallGEMMReshape2DShapes(),
                                                                    b_values),
index e7e46b7bc5f7dc0c0321de3b3a98f18278f2b1bd..a31aec4d0cecbd072d3e4bbf1b61a3ef454af5bc 100644 (file)
@@ -138,45 +138,6 @@ using CLIm2ColFixture = Im2ColValidationFixture<CLTensor, CLAccessor, CLIm2Col,
 
 TEST_SUITE(NHWC)
 
-/** Test that there's no padding added to input or output as part of configure
- *
- * @note 2 elements processed per iteration
- *
- * Three tests will be run:
- *  - Channels are multiple of elements processed
- *  - Channels larger and non multiple of elements used
- *  - Channels smaller and not multiple of elements used
- *
- */
-DATA_TEST_CASE(ValidateZeroPaddingNumElemsPerIterEqual2, framework::DatasetMode::ALL,
-               combine(combine(combine(combine(combine(
-                                                   framework::dataset::make("InputChannel",
-{
-    2, 9, 1,
-}),
-framework::dataset::make("DataType", { DataType::F32 })),
-framework::dataset::make("Kernel", { Size2D(3, 4) })),
-framework::dataset::make("PadStride", { PadStrideInfo(2, 1, 1, 2) })),
-framework::dataset::make("QInfo", { QuantizationInfo() })),
-framework::dataset::make("DataLayout", { DataLayout::NHWC })),
-input_channel, data_type, conv_size, pad_stride_info, qinfo, data_layout)
-{
-    TensorShape input_shape(input_channel, 10U, 30U, 3U);
-    const bool  has_bias = false;
-
-    const auto input_info   = TensorInfo(input_shape, 1, data_type, data_layout);
-    const auto output_shape = compute_im2col_conv_shape(&input_info, conv_size, pad_stride_info, has_bias, Size2D(1U, 1U), true);
-
-    CLTensor input  = create_tensor<CLTensor>(input_shape, data_type, 1, qinfo, data_layout);
-    CLTensor output = create_tensor<CLTensor>(output_shape, data_type, 1, qinfo, data_layout);
-
-    CLIm2ColKernel im2col;
-    im2col.configure(&input, &output, conv_size, pad_stride_info, has_bias);
-
-    // Ensure there're no paddings added at all
-    const bool no_padding = input.info()->padding().empty() && output.info()->padding().empty();
-    ARM_COMPUTE_EXPECT(no_padding, framework::LogLevel::ERRORS);
-}
 /** Test special kernel used for NHWC for 3x3 kernels
  *
  * @note 2 elements processed per iteration
index 071b58323cda701e062262a0cdb2ed6fd8b02bad..c79775e1e2e985bd0498b4c26b1c02a69ce7c07f 100644 (file)
@@ -85,39 +85,6 @@ const auto                           pool_data_layout_dataset = framework::datas
 
 const auto pool_fp_mixed_precision_dataset = framework::dataset::make("FpMixedPrecision", { true, false });
 
-/** Zero padding test */
-bool validate_zero_padding(unsigned int width, DataType data_type)
-{
-    const PoolingLayerInfo pool_info(PoolingType::MAX, Size2D(2U, 2U), DataLayout::NHWC);
-
-    TensorShape shape(width, 23, 11, 1);
-
-    // Create tensors
-    CLTensor src = create_tensor<CLTensor>(shape, data_type);
-    CLTensor idx;
-    CLTensor dst;
-
-    src.info()->set_quantization_info(QuantizationInfo(1.f / 256.f, 0));
-    dst.info()->set_quantization_info(QuantizationInfo(1.f / 256.f, 0));
-
-    CLPoolingLayer pool;
-
-    if(is_data_type_quantized(data_type))
-    {
-        pool.configure(&src, &dst, pool_info, nullptr);
-
-        // Padding can be added along rhs and bias's X dimension
-        return src.info()->padding().empty() && dst.info()->padding().empty();
-    }
-    else
-    {
-        pool.configure(&src, &dst, pool_info, &idx);
-
-        // Padding can be added along rhs and bias's X dimension
-        return src.info()->padding().empty() && dst.info()->padding().empty() && idx.info()->padding().empty();
-    }
-}
-
 } // namespace
 
 TEST_SUITE(CL)
@@ -159,26 +126,6 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
     ARM_COMPUTE_EXPECT(bool(CLPoolingLayer::validate(&input_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), pool_info)) == expected, framework::LogLevel::ERRORS);
 }
 
-/** Validate zero padding tests
- *
- * A series of validation tests to check that no padding is added as part of configuration for 4 different scenarios.
- *
- * Checks performed in order:
- *     - First dimension multiple of 16
- *     - First dimension non-multiple of 16
- *     - First dimension less than 16 (vec_size for qasymm8) but multiple
- *     - First dimension less than 16 (vec_size for qasymm8) non-multiple
- *     - Tensor with only one element
- */
-DATA_TEST_CASE(ValidateZeroPadding, framework::DatasetMode::ALL, zip(
-framework::dataset::make("Width",    { 32U, 37U, 12U, 13U, 1U }),
-framework::dataset::make("DataType", { DataType::F32, DataType::QASYMM8 })),
-width, data_type)
-{
-    bool status = validate_zero_padding(width, data_type);
-    ARM_COMPUTE_EXPECT(status, framework::LogLevel::ERRORS);
-}
-
 // clang-format on
 // *INDENT-ON*
 
index f206e92493b669b759a69a6e7e463155dfd86288..750799ace2724dc51f1a9e19fc66337241f9a7e0 100644 (file)
@@ -183,28 +183,6 @@ const auto ActivationFunctionsSmallDataset = framework::dataset::make("Activatio
     ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::SOFT_RELU)
 });
 
-/** Zero padding test */
-bool validate_zero_padding(unsigned int width, unsigned height)
-{
-    TensorShape shape(width, height, 11, 1);
-
-    WinogradInfo winograd_info = WinogradInfo(Size2D(4U, 4U), Size2D(5U, 5U), Size2D(width, height), PadStrideInfo(), DataLayout::NHWC);
-
-    // Create tensors
-    CLTensor src = create_tensor<CLTensor>(shape, DataType::F32, 1, QuantizationInfo(), DataLayout::NHWC);
-    CLTensor dst;
-
-    src.info()->set_quantization_info(QuantizationInfo(1.f / 256.f, 0));
-    dst.info()->set_quantization_info(QuantizationInfo(1.f / 256.f, 0));
-
-    CLWinogradInputTransform input_transform;
-
-    input_transform.configure(&src, &dst, winograd_info);
-
-        // Padding can be added along rhs and bias's X dimension
-    return src.info()->padding().empty() && dst.info()->padding().empty();
-}
-
 } // namespace
 
 using namespace arm_compute::misc::shape_calculator;
@@ -214,19 +192,6 @@ TEST_SUITE(Winograd)
 
 TEST_SUITE(InputTransform)
 
-/** Validate zero padding tests
- *
- * A series of validation tests to check that no padding is added
- */
-DATA_TEST_CASE(ValidateZeroPadding, framework::DatasetMode::ALL, zip(
-framework::dataset::make("Width",  { 32U, 37U, 12U, 1U }),
-framework::dataset::make("Height", { 13U, 27U, 19U, 1U })),
-width, height)
-{
-    bool status = validate_zero_padding(width, height);
-    ARM_COMPUTE_EXPECT(status, framework::LogLevel::ERRORS);
-}
-
 DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
                                                 framework::dataset::make("InputInfo",{
                                                                                         TensorInfo(TensorShape(53U, 21U, 5U, 3U), 1, DataType::F16),     // F16 not supported