[conv] support dilation property
authorhyeonseok lee <hs89.lee@samsung.com>
Tue, 28 Jun 2022 05:21:44 +0000 (14:21 +0900)
committerJijoong Moon <jijoong.moon@samsung.com>
Mon, 11 Jul 2022 01:13:34 +0000 (10:13 +0900)
 - Support dilation property in conv1d/conv2d layer
 - Added unittest with dilation

Close #1922

Signed-off-by: hyeonseok lee <hs89.lee@samsung.com>
12 files changed:
nntrainer/layers/common_properties.cpp
nntrainer/layers/common_properties.h
nntrainer/layers/conv1d_layer.cpp
nntrainer/layers/conv1d_layer.h
nntrainer/layers/conv2d_layer.cpp
nntrainer/layers/conv2d_layer.h
nntrainer/layers/pooling2d_layer.cpp
packaging/unittest_layers_v2.tar.gz
test/input_gen/genLayerTests.py
test/unittest/layers/unittest_layers_convolution1d.cpp
test/unittest/layers/unittest_layers_convolution2d.cpp
test/unittest/unittest_common_properties.cpp

index 96e17e5..f662e85 100644 (file)
@@ -105,6 +105,8 @@ PoolSize::PoolSize(unsigned int value) { set(value); }
 
 Stride::Stride(unsigned int value) { set(value); }
 
+Dilation::Dilation(unsigned int value) { set(value); }
+
 /**
  * @brief unsigned integer property, internally used to parse padding values
  *
@@ -141,7 +143,8 @@ bool Padding2D::isValid(const std::string &v) const {
 
 std::array<unsigned int, 4>
 Padding2D::compute(const TensorDim &input, const TensorDim &kernel,
-                   const std::array<unsigned int, 2> &strides) {
+                   const std::array<unsigned int, 2> &strides,
+                   const std::array<unsigned int, 2> &dilation) {
   auto &padding_repr = get(); /// padding representation
 
   if (istrequal(padding_repr, "valid")) {
@@ -152,21 +155,19 @@ Padding2D::compute(const TensorDim &input, const TensorDim &kernel,
   /// possible. otherwise pad_all_side / 2 is allocated to top | left and rest
   /// are assigned to the other side
   if (istrequal(padding_repr, "same")) {
-    /// @note if we start to consider dilation, this calculation has to tuned
-    /// accordingly.
-
     auto calculate_padding = [](unsigned input_, unsigned kernel_,
-                                unsigned stride) {
+                                unsigned stride, unsigned dilation) {
       /// ceil(input / stride)
+      unsigned int eff_kernel = (kernel_ - 1) * dilation + 1;
       auto out = (input_ + stride - 1) / stride;
-      auto req_input = (out - 1) * stride + kernel_;
+      auto req_input = (out - 1) * stride + eff_kernel;
       return req_input >= input_ ? req_input - input_ : 0;
     };
 
+    auto pad_vertical = calculate_padding(input.height(), kernel.height(),
+                                          strides[0], dilation[0]);
     auto pad_horizontal =
-      calculate_padding(input.width(), kernel.width(), strides[1]);
-    auto pad_vertical =
-      calculate_padding(input.height(), kernel.height(), strides[0]);
+      calculate_padding(input.width(), kernel.width(), strides[1], dilation[1]);
 
     auto pad_top = pad_vertical / 2;
     auto pad_left = pad_horizontal / 2;
index 4312b5b..9f87180 100644 (file)
@@ -359,6 +359,22 @@ public:
 };
 
 /**
+ * @brief Dilation property, dilation indicates how many space will be inserted
+ * between kernel element
+ *
+ */
+class Dilation : public nntrainer::PositiveIntegerProperty {
+public:
+  /**
+   * @brief Construct a new Dilation object with a default value 1
+   *
+   */
+  Dilation(unsigned int value = 1);
+  static constexpr const char *key = "dilation"; /**< unique key to access */
+  using prop_tag = uint_prop_tag;                /**< property type */
+};
+
+/**
  * @brief Padding2D property, this is used to calculate padding2D
  * @details Padding2D is saved as a string. Upon calling Padding2D::compute,
  * returns std::vector<unsigned int> which has computed padding2Ds, below
@@ -393,7 +409,8 @@ public:
    */
   std::array<unsigned int, 4>
   compute(const TensorDim &input, const TensorDim &kernel,
-          const std::array<unsigned int, 2> &strides);
+          const std::array<unsigned int, 2> &strides,
+          const std::array<unsigned int, 2> &dilation);
 };
 
 /**
index cfe89ec..dc41bc0 100644 (file)
@@ -32,7 +32,7 @@ Conv1DLayer::Conv1DLayer(const std::array<unsigned int, 2> &padding_) :
   LayerImpl(),
   padding(padding_),
   conv_props(props::FilterSize(), props::KernelSize(), props::Stride(),
-             props::Padding2D()) {
+             props::Padding2D(), props::Dilation()) {
   wt_idx.fill(std::numeric_limits<unsigned>::max());
   conv2d_layer = std::make_unique<Conv2DLayer>();
 }
@@ -65,6 +65,9 @@ void Conv1DLayer::finalize(InitLayerContext &context) {
                   std::to_string(std::get<props::Stride>(conv_props).get()));
   setPropertyKV(props::Padding2D::key,
                 std::get<props::Padding2D>(conv_props).get());
+  setPropertyKV(props::Dilation::key,
+                "1," +
+                  std::to_string(std::get<props::Dilation>(conv_props).get()));
 
   conv2d_layer->finalize(context);
 }
index a328da3..eadc903 100644 (file)
@@ -101,7 +101,7 @@ public:
 private:
   std::array<unsigned int, 2> padding;
   std::tuple<props::FilterSize, props::KernelSize, props::Stride,
-             props::Padding2D>
+             props::Padding2D, props::Dilation>
     conv_props;
 
   std::array<unsigned int, 5> wt_idx; /**< indices of the weights and tensors */
index fec2d6a..6c685d1 100644 (file)
@@ -52,7 +52,7 @@ static TensorDim calcCol2ImOutputDim(const TensorDim &out,
 static void col2im(const Tensor &col_matrix, const TensorDim &kdim,
                    const std::array<unsigned, 4> &padding,
                    const std::array<props::Stride, CONV2D_DIM> &mstride,
-                   const std::array<unsigned, CONV2D_DIM> &dilation,
+                   const std::array<props::Dilation, CONV2D_DIM> &dilation,
                    Tensor &image) {
   auto [pt, pb, pl, pr] = padding;
 
@@ -129,7 +129,7 @@ static void col2im(const Tensor &col_matrix, const TensorDim &kdim,
 static void im2col(const Tensor &in, const TensorDim &kdim,
                    const std::array<unsigned int, 4> &padding,
                    const std::array<props::Stride, CONV2D_DIM> &mstride,
-                   const std::array<unsigned int, CONV2D_DIM> &dilation,
+                   const std::array<props::Dilation, CONV2D_DIM> &dilation,
                    Tensor &out) {
   /// for channel last mode, this is deprecated for now, leaving here on
   /// purpose.
@@ -248,7 +248,8 @@ Conv2DLayer::Conv2DLayer(
   LayerImpl(),
   padding(padding_),
   conv_props(props::FilterSize(), std::array<props::KernelSize, CONV2D_DIM>(),
-             std::array<props::Stride, CONV2D_DIM>(), props::Padding2D()) {
+             std::array<props::Stride, CONV2D_DIM>(), props::Padding2D(),
+             std::array<props::Dilation, CONV2D_DIM>()) {
   wt_idx.fill(std::numeric_limits<unsigned>::max());
 }
 
@@ -274,17 +275,20 @@ void Conv2DLayer::finalize(InitLayerContext &context) {
   auto &kernel_size =
     std::get<std::array<props::KernelSize, CONV2D_DIM>>(conv_props);
   auto &stride = std::get<std::array<props::Stride, CONV2D_DIM>>(conv_props);
+  auto &dilation =
+    std::get<std::array<props::Dilation, CONV2D_DIM>>(conv_props);
 
-  TensorDim dim =
+  TensorDim kernel_dim =
     TensorDim(filter_size, in_dim.channel(), kernel_size[0], kernel_size[1]);
   TensorDim bias_dim = TensorDim(1, filter_size, 1, 1);
 
   padding = std::get<props::Padding2D>(conv_props)
-              .compute(in_dim, dim, {stride[0], stride[1]});
+              .compute(in_dim, kernel_dim, {stride[0], stride[1]},
+                       {dilation[0], dilation[1]});
 
   wt_idx[ConvParams::weight] = context.requestWeight(
-    dim, weight_initializer, weight_regularizer, weight_regularizer_constant,
-    weight_decay, "filter", true);
+    kernel_dim, weight_initializer, weight_regularizer,
+    weight_regularizer_constant, weight_decay, "filter", true);
 
   if (disable_bias.empty() || disable_bias.get() == false) {
     wt_idx[ConvParams::bias] =
@@ -296,11 +300,14 @@ void Conv2DLayer::finalize(InitLayerContext &context) {
   unsigned int eff_in_height = in_dim.height() + padding[0] + padding[1];
   unsigned int eff_in_width = in_dim.width() + padding[2] + padding[3];
 
+  unsigned int eff_k_height = (kernel_size[0] - 1) * dilation[0] + 1;
+  unsigned int eff_k_width = (kernel_size[1] - 1) * dilation[1] + 1;
+
   TensorDim out_dim;
   out_dim.batch(in_dim.batch());
   out_dim.channel(filter_size);
-  out_dim.height((eff_in_height - kernel_size[0]) / stride[0] + 1);
-  out_dim.width((eff_in_width - kernel_size[1]) / stride[1] + 1);
+  out_dim.height((eff_in_height - eff_k_height) / stride[0] + 1);
+  out_dim.width((eff_in_width - eff_k_width) / stride[1] + 1);
   context.setOutputDimensions({out_dim});
 
   if (eff_in_height < kernel_size[0] || eff_in_width < kernel_size[1]) {
@@ -328,7 +335,7 @@ void Conv2DLayer::finalize(InitLayerContext &context) {
    * which will be expensive.
    */
   wt_idx[ConvParams::inter_result] = context.requestTensor(
-    calcCol2ImOutputDim(out_dim, dim), "inter_result",
+    calcCol2ImOutputDim(out_dim, kernel_dim), "inter_result",
     Tensor::Initializer::NONE, false, TensorLifespan::ITERATION_LIFESPAN);
 }
 
@@ -337,6 +344,8 @@ void Conv2DLayer::forwarding(RunLayerContext &context, bool training) {
 
   unsigned int filter_size = std::get<props::FilterSize>(conv_props);
   auto &stride = std::get<std::array<props::Stride, CONV2D_DIM>>(conv_props);
+  auto &dilation =
+    std::get<std::array<props::Dilation, CONV2D_DIM>>(conv_props);
 
   Tensor &input_ = context.getInput(SINGLE_INOUT_IDX);
   Tensor &hidden_ = context.getOutput(SINGLE_INOUT_IDX);
@@ -406,7 +415,7 @@ void Conv2DLayer::forwarding(RunLayerContext &context, bool training) {
 
     Tensor in_sub = input_.getBatchSlice(b, 1);
 
-    im2col(in_sub, filter_dim, padding, stride, {1, 1}, im2col_result);
+    im2col(in_sub, filter_dim, padding, stride, dilation, im2col_result);
     filter_kernel.dot(im2col_result, out, false, true);
   }
 
@@ -424,6 +433,8 @@ void Conv2DLayer::forwarding(RunLayerContext &context, bool training) {
 void Conv2DLayer::calcDerivative(RunLayerContext &context) {
   unsigned int filter_size = std::get<props::FilterSize>(conv_props);
   auto &stride = std::get<std::array<props::Stride, CONV2D_DIM>>(conv_props);
+  auto &dilation =
+    std::get<std::array<props::Dilation, CONV2D_DIM>>(conv_props);
 
   const Tensor &derivative = context.getIncomingDerivative(SINGLE_INOUT_IDX);
   Tensor &input_derivative = context.getOutgoingDerivative(SINGLE_INOUT_IDX);
@@ -447,7 +458,7 @@ void Conv2DLayer::calcDerivative(RunLayerContext &context) {
     deriv_sub.reshape({filter_size, derivative.width() * derivative.height()});
 
     filter_kernel.dot(deriv_sub, col2im_result, true, false);
-    col2im(col2im_result, filter_dim, padding, stride, {1, 1}, in_deriv_sub);
+    col2im(col2im_result, filter_dim, padding, stride, dilation, in_deriv_sub);
   }
 
   filter_kernel.reshape(filter_dim);
@@ -456,6 +467,8 @@ void Conv2DLayer::calcDerivative(RunLayerContext &context) {
 void Conv2DLayer::calcGradient(RunLayerContext &context) {
   unsigned int filter_size = std::get<props::FilterSize>(conv_props);
   auto &stride = std::get<std::array<props::Stride, CONV2D_DIM>>(conv_props);
+  auto &dilation =
+    std::get<std::array<props::Dilation, CONV2D_DIM>>(conv_props);
 
   const Tensor &derivative = context.getIncomingDerivative(SINGLE_INOUT_IDX);
   Tensor &input_ = context.getInput(SINGLE_INOUT_IDX);
@@ -489,7 +502,7 @@ void Conv2DLayer::calcGradient(RunLayerContext &context) {
      * expense of memory. In this case, memory of im2col_result must be saved
      * for the whole batch. try this while benchmarking.
      */
-    im2col(in_sub, filter_dim, padding, stride, {1, 1}, im2col_result);
+    im2col(in_sub, filter_dim, padding, stride, dilation, im2col_result);
     deriv_sub.dot(im2col_result, delK, false, false, b == 0 ? 0 : 1);
   }
 
index 5633ea8..0c59afb 100644 (file)
@@ -111,7 +111,8 @@ public:
 private:
   std::array<unsigned int, CONV2D_DIM * 2> padding;
   std::tuple<props::FilterSize, std::array<props::KernelSize, CONV2D_DIM>,
-             std::array<props::Stride, CONV2D_DIM>, props::Padding2D>
+             std::array<props::Stride, CONV2D_DIM>, props::Padding2D,
+             std::array<props::Dilation, CONV2D_DIM>>
     conv_props;
 
   std::array<unsigned int, 5> wt_idx; /**< indices of the weights and tensors */
index 8cbffe9..8ecc3b0 100644 (file)
@@ -63,9 +63,9 @@ void Pooling2DLayer::finalize(InitLayerContext &context) {
     pool_size.emplace_back(props::PoolSize(in_dim.width()));
   }
 
-  padding =
-    std::get<props::Padding2D>(pooling2d_props)
-      .compute(in_dim, {pool_size[0], pool_size[1]}, {stride[0], stride[1]});
+  padding = std::get<props::Padding2D>(pooling2d_props)
+              .compute(in_dim, {pool_size[0], pool_size[1]},
+                       {stride[0], stride[1]}, {1, 1});
 
   auto [pt, pb, pl, pr] = padding;
 
index aa64225..1deccfc 100644 (file)
Binary files a/packaging/unittest_layers_v2.tar.gz and b/packaging/unittest_layers_v2.tar.gz differ
index 5401bb0..1a697dd 100644 (file)
@@ -79,6 +79,14 @@ if __name__ == "__main__":
     record_single(conv, (1, 2, 5, 5), "conv2d_sb_1x1_kernel")
     record_single(conv, (3, 2, 5, 5), "conv2d_mb_1x1_kernel")
 
+    conv = K.layers.Conv2D(2, 3, dilation_rate=(2, 2))
+    record_single(conv, (1, 3, 11, 11), "conv2d_sb_dilation")
+    record_single(conv, (3, 3, 11, 11), "conv2d_mb_dilation")
+
+    conv = K.layers.Conv2D(2, 3, padding="same", dilation_rate=(2, 2))
+    record_single(conv, (1, 3, 11, 11), "conv2d_sb_same_dilation")
+    record_single(conv, (3, 3, 11, 11), "conv2d_mb_same_dilation")
+
     # use float data to generate input here
     attention = K.layers.Attention()
     record_single(attention, [(1, 5, 7), (1, 3, 7)],
@@ -238,6 +246,14 @@ if __name__ == "__main__":
     record_single(conv, (1, 2, 1, 5), "conv1d_sb_1x1_kernel")
     record_single(conv, (3, 2, 1, 5), "conv1d_mb_1x1_kernel")
 
+    conv = K.layers.Conv1D(2, 3, dilation_rate=2)
+    record_single(conv, (1, 3, 1, 11), "conv1d_sb_dilation")
+    record_single(conv, (3, 3, 1, 11), "conv1d_mb_dilation")
+
+    conv = K.layers.Conv1D(2, 3, padding="same", dilation_rate=2)
+    record_single(conv, (1, 3, 1, 11), "conv1d_sb_same_dilation")
+    record_single(conv, (3, 3, 1, 11), "conv1d_mb_same_dilation")
+
     concat = K.layers.Concatenate(axis=3)
     record_single(concat, [(2,3,3,2), (2, 3, 3, 3)], "concat_dim3")
 
index db59e63..4982e19 100644 (file)
@@ -139,13 +139,55 @@ auto conv1d_mb_1x1_kernel =
                            "3:2:1:5", "conv1d_mb_1x1_kernel.nnlayergolden",
                            LayerGoldenTestParamOptions::DEFAULT);
 
+auto conv1d_sb_dilation =
+  LayerGoldenTestParamType(nntrainer::createLayer<nntrainer::Conv1DLayer>,
+                           {
+                             "filters=2",
+                             "kernel_size=3",
+                             "dilation=2",
+                           },
+                           "1:3:1:11", "conv1d_sb_dilation.nnlayergolden",
+                           LayerGoldenTestParamOptions::DEFAULT);
+
+auto conv1d_mb_dilation =
+  LayerGoldenTestParamType(nntrainer::createLayer<nntrainer::Conv1DLayer>,
+                           {
+                             "filters=2",
+                             "kernel_size=3",
+                             "dilation=2",
+                           },
+                           "3:3:1:11", "conv1d_mb_dilation.nnlayergolden",
+                           LayerGoldenTestParamOptions::DEFAULT);
+
+auto conv1d_sb_same_dilation =
+  LayerGoldenTestParamType(nntrainer::createLayer<nntrainer::Conv1DLayer>,
+                           {
+                             "filters=2",
+                             "kernel_size=3",
+                             "padding=same",
+                             "dilation=2",
+                           },
+                           "1:3:1:11", "conv1d_sb_same_dilation.nnlayergolden",
+                           LayerGoldenTestParamOptions::DEFAULT);
+
+auto conv1d_mb_same_dilation =
+  LayerGoldenTestParamType(nntrainer::createLayer<nntrainer::Conv1DLayer>,
+                           {
+                             "filters=2",
+                             "kernel_size=3",
+                             "padding=same",
+                             "dilation=2",
+                           },
+                           "3:3:1:11", "conv1d_mb_same_dilation.nnlayergolden",
+                           LayerGoldenTestParamOptions::DEFAULT);
+
 GTEST_PARAMETER_TEST(
   Convolution1D, LayerGoldenTest,
-  ::testing::Values(conv1d_sb_minimum, conv1d_mb_minimum, conv1d_sb_same_remain,
-                    conv1d_mb_same_remain, conv1d_sb_same_uneven_remain_1,
-                    conv1d_sb_same_uneven_remain_2,
-                    conv1d_mb_same_uneven_remain_1,
-                    conv1d_mb_same_uneven_remain_2, conv1d_sb_valid_drop_last,
-                    conv1d_mb_valid_drop_last, conv1d_sb_no_overlap,
-                    conv1d_mb_no_overlap, conv1d_sb_1x1_kernel,
-                    conv1d_mb_1x1_kernel));
+  ::testing::Values(
+    conv1d_sb_minimum, conv1d_mb_minimum, conv1d_sb_same_remain,
+    conv1d_mb_same_remain, conv1d_sb_same_uneven_remain_1,
+    conv1d_sb_same_uneven_remain_2, conv1d_mb_same_uneven_remain_1,
+    conv1d_mb_same_uneven_remain_2, conv1d_sb_valid_drop_last,
+    conv1d_mb_valid_drop_last, conv1d_sb_no_overlap, conv1d_mb_no_overlap,
+    conv1d_sb_1x1_kernel, conv1d_mb_1x1_kernel, conv1d_sb_dilation,
+    conv1d_mb_dilation, conv1d_sb_same_dilation, conv1d_mb_same_dilation));
index 927e3c0..0e0cfe4 100644 (file)
@@ -139,13 +139,55 @@ auto conv2d_mb_1x1_kernel =
                            "3:2:5:5", "conv2d_mb_1x1_kernel.nnlayergolden",
                            LayerGoldenTestParamOptions::DEFAULT);
 
+auto conv2d_sb_dilation =
+  LayerGoldenTestParamType(nntrainer::createLayer<nntrainer::Conv2DLayer>,
+                           {
+                             "filters=2",
+                             "kernel_size=3,3",
+                             "dilation=2,2",
+                           },
+                           "1:3:11:11", "conv2d_sb_dilation.nnlayergolden",
+                           LayerGoldenTestParamOptions::DEFAULT);
+
+auto conv2d_mb_dilation =
+  LayerGoldenTestParamType(nntrainer::createLayer<nntrainer::Conv2DLayer>,
+                           {
+                             "filters=2",
+                             "kernel_size=3,3",
+                             "dilation=2,2",
+                           },
+                           "3:3:11:11", "conv2d_mb_dilation.nnlayergolden",
+                           LayerGoldenTestParamOptions::DEFAULT);
+
+auto conv2d_sb_same_dilation =
+  LayerGoldenTestParamType(nntrainer::createLayer<nntrainer::Conv2DLayer>,
+                           {
+                             "filters=2",
+                             "kernel_size=3,3",
+                             "padding=same",
+                             "dilation=2,2",
+                           },
+                           "1:3:11:11", "conv2d_sb_same_dilation.nnlayergolden",
+                           LayerGoldenTestParamOptions::DEFAULT);
+
+auto conv2d_mb_same_dilation =
+  LayerGoldenTestParamType(nntrainer::createLayer<nntrainer::Conv2DLayer>,
+                           {
+                             "filters=2",
+                             "kernel_size=3,3",
+                             "padding=same",
+                             "dilation=2,2",
+                           },
+                           "3:3:11:11", "conv2d_mb_same_dilation.nnlayergolden",
+                           LayerGoldenTestParamOptions::DEFAULT);
+
 GTEST_PARAMETER_TEST(
   Convolution2D, LayerGoldenTest,
-  ::testing::Values(conv2d_sb_minimum, conv2d_mb_minimum, conv2d_sb_same_remain,
-                    conv2d_mb_same_remain, conv2d_sb_same_uneven_remain_1,
-                    conv2d_sb_same_uneven_remain_2,
-                    conv2d_mb_same_uneven_remain_1,
-                    conv2d_mb_same_uneven_remain_2, conv2d_sb_valid_drop_last,
-                    conv2d_mb_valid_drop_last, conv2d_sb_no_overlap,
-                    conv2d_mb_no_overlap, conv2d_sb_1x1_kernel,
-                    conv2d_mb_1x1_kernel));
+  ::testing::Values(
+    conv2d_sb_minimum, conv2d_mb_minimum, conv2d_sb_same_remain,
+    conv2d_mb_same_remain, conv2d_sb_same_uneven_remain_1,
+    conv2d_sb_same_uneven_remain_2, conv2d_mb_same_uneven_remain_1,
+    conv2d_mb_same_uneven_remain_2, conv2d_sb_valid_drop_last,
+    conv2d_mb_valid_drop_last, conv2d_sb_no_overlap, conv2d_mb_no_overlap,
+    conv2d_sb_1x1_kernel, conv2d_mb_1x1_kernel, conv2d_sb_dilation,
+    conv2d_mb_dilation, conv2d_sb_same_dilation, conv2d_mb_same_dilation));
index a5c9d19..acf1f75 100644 (file)
@@ -180,18 +180,18 @@ TEST(Padding2D, setPropertyValid_p) {
   EXPECT_NO_THROW(p.set("Same"));
   EXPECT_EQ(p.get(), "Same");
 
-  EXPECT_EQ(p.compute({32, 32}, {3, 3}, {1, 1}),
+  EXPECT_EQ(p.compute({32, 32}, {3, 3}, {1, 1}, {1, 1}),
             (std::array<unsigned int, 4>({1, 1, 1, 1})));
 
   EXPECT_NO_THROW(p.set("valid"));
   EXPECT_EQ(p.get(), "valid");
 
-  EXPECT_EQ(p.compute({32, 32}, {3, 3}, {1, 1}),
+  EXPECT_EQ(p.compute({32, 32}, {3, 3}, {1, 1}, {1, 1}),
             (std::array<unsigned int, 4>({0, 0, 0, 0})));
 
   EXPECT_NO_THROW(p.set("1"));
   EXPECT_EQ(p.get(), "1");
-  EXPECT_EQ(p.compute({32, 32}, {3, 3}, {1, 1}),
+  EXPECT_EQ(p.compute({32, 32}, {3, 3}, {1, 1}, {1, 1}),
             (std::array<unsigned int, 4>({1, 1, 1, 1})));
 
   EXPECT_NO_THROW(p.set("0"));
@@ -199,12 +199,12 @@ TEST(Padding2D, setPropertyValid_p) {
 
   EXPECT_NO_THROW(p.set("1, 2"));
   EXPECT_EQ(p.get(), "1, 2");
-  EXPECT_EQ(p.compute({32, 32}, {3, 3}, {1, 1}),
+  EXPECT_EQ(p.compute({32, 32}, {3, 3}, {1, 1}, {1, 1}),
             (std::array<unsigned int, 4>({1, 1, 2, 2})));
 
   EXPECT_NO_THROW(p.set("1, 2, 3, 4"));
   EXPECT_EQ(p.get(), "1, 2, 3, 4");
-  EXPECT_EQ(p.compute({32, 32}, {3, 3}, {1, 1}),
+  EXPECT_EQ(p.compute({32, 32}, {3, 3}, {1, 1}, {1, 1}),
             (std::array<unsigned int, 4>({1, 2, 3, 4})));
 }