[enco] Read pad from ConvolutionParameter (#1268)
author박종현/동작제어Lab(SR)/Staff Engineer/삼성전자 <jh1302.park@samsung.com>
Fri, 31 Aug 2018 06:24:30 +0000 (15:24 +0900)
committerGitHub Enterprise <noreply-CODE@samsung.com>
Fri, 31 Aug 2018 06:24:30 +0000 (15:24 +0900)
* [enco] Read pad from ConvolutionParameter

This commit revises ConvolutionSpec to read pad values from
ConvolutionParameter.

Note that 'pad_h' and 'pad_w' parameters are not supported, yet.

Signed-off-by: Jonghyun Park <jh1302.park@samsung.com>
* Fix typos (stride_h -> pad_h, stride_w -> pad_w)

contrib/enco/frontend/caffe/src/ConvolutionSpec.cpp
contrib/enco/frontend/caffe/src/ConvolutionSpec.h
contrib/enco/frontend/caffe/src/ConvolutionSpec.test.cpp

index 59c6a18..3c5e26b 100644 (file)
@@ -41,12 +41,6 @@ CanonicalChannelAxis canonical_channel_axis(int32_t axis) { return CanonicalChan
 ConvolutionSpec::ConvolutionSpec(const ::caffe::ConvolutionParameter &param)
     : _param(param), _num_output{0}
 {
-  // NOTE Padding is not supported, yet
-  // TODO Support padding
-  assert(param.pad().size() == 0);
-  assert(param.pad_h() == 0);
-  assert(param.pad_w() == 0);
-
   // NOTE Dilation is not supported, yet
   // TODO Support dilation
   assert(param.dilation().size() == 0);
@@ -61,6 +55,29 @@ uint32_t ConvolutionSpec::channel_axis(void) const
   return canonical_channel_axis(_param.axis()).eval(ifm_shape());
 }
 
+uint32_t ConvolutionSpec::pad(uint32_t spatial_axis) const
+{
+  assert(spatial_axis < num_spatial_axes());
+
+  // TODO Support pad_h/pad_w parameters
+  assert(!_param.has_pad_h());
+  assert(!_param.has_pad_w());
+
+  if (_param.pad().size() == 0)
+  {
+    // NOTE default pad is 0
+    return 0;
+  }
+
+  if (_param.pad().size() == 1)
+  {
+    return _param.pad(0);
+  }
+
+  assert(_param.pad().size() == num_spatial_axes());
+  return _param.pad(spatial_axis);
+}
+
 uint32_t ConvolutionSpec::stride(uint32_t spatial_axis) const
 {
   assert(spatial_axis < num_spatial_axes());
@@ -140,7 +157,7 @@ nncc::core::ADT::tensor::Shape ConvolutionSpec::ofm_shape(void) const
 
     uint32_t dim = 0;
 
-    dim += ifm_dim(full_axis) - ker_dim(spatial_axis);
+    dim += ifm_dim(full_axis) - ker_dim(spatial_axis) + 2 * pad(spatial_axis);
     dim /= stride(spatial_axis);
     dim += 1;
 
index 6c6e288..d143217 100644 (file)
@@ -19,6 +19,7 @@ public:
   uint32_t num_batch_axes(void) const { return channel_axis(); }
   uint32_t num_spatial_axes(void) const { return ifm_rank() - channel_axis() - 1; }
 
+  uint32_t pad(uint32_t spatial_axis) const;
   uint32_t stride(uint32_t spatial_axis) const;
   uint32_t ker_dim(uint32_t spatial_axis) const;
 
index ffd399c..a59634b 100644 (file)
@@ -224,3 +224,57 @@ TEST_F(ConvolutionSpecTest, conv_2)
     ASSERT_EQ(expected, obtained);
   }
 }
+
+namespace
+{
+// clang-format off
+const char *conv_pad = STRING(
+layer {
+  name: "data"
+  type: "Input"
+  top: "data"
+  input_param {
+    shape: { dim: 1 dim: 3 dim: 16 dim: 16 }
+  }
+}
+layer {
+  name: "conv"
+  type: "Convolution"
+  bottom: "data"
+  top: "conv"
+  convolution_param {
+    bias_term: false
+    num_output: 2
+    pad: 2
+    kernel_size: 3
+  }
+}
+);
+// clang-format on
+} // namespace
+
+TEST_F(ConvolutionSpecTest, conv_pad)
+{
+  ::caffe::NetParameter param;
+
+  ASSERT_TRUE(load(conv_pad, param));
+
+  ::caffe::Net<float> net{param};
+
+  const tensor::Shape ifm_shape{1, 3, 16, 16};
+  ConvolutionSpec spec{param.layer(1).convolution_param()};
+
+  spec.ifm_shape(ifm_shape);
+
+  // Check 'pad'
+  ASSERT_EQ(spec.pad(0), 2);
+  ASSERT_EQ(spec.pad(1), 2);
+
+  // Check 'ofm_shape'
+  {
+    auto expected = as_tensor_shape(net.blob_by_name("conv")->shape());
+    auto obtained = spec.ofm_shape();
+
+    ASSERT_EQ(expected, obtained);
+  }
+}