[enco] Infer Output Shape of Convolution with group (#1501)
author박종현/동작제어Lab(SR)/Staff Engineer/삼성전자 <jh1302.park@samsung.com>
Mon, 17 Sep 2018 02:03:28 +0000 (11:03 +0900)
committerGitHub Enterprise <noreply-CODE@samsung.com>
Mon, 17 Sep 2018 02:03:28 +0000 (11:03 +0900)
This commit extends ConvolutionSpec to handle non-default group parameter.

Signed-off-by: Jonghyun Park <jh1302.park@samsung.com>
contrib/enco/frontend/caffe/src/ConvolutionSpec.cpp
contrib/enco/frontend/caffe/src/ConvolutionSpec.h
contrib/enco/frontend/caffe/src/ConvolutionSpec.test.cpp

index ae38ed5..cc6dcb4 100644 (file)
@@ -8,12 +8,10 @@ ConvolutionSpec::ConvolutionSpec(const ::caffe::ConvolutionParameter &param) : _
   // NOTE Dilation is not supported, yet
   // TODO Support dilation
   assert(param.dilation().size() == 0);
-
-  // NOTE Group convolution is not supported, yet
-  // TODO Support group convolution
-  assert(param.group() == 1);
 }
 
+uint32_t ConvolutionSpec::group(void) const { return _param.group(); }
+
 uint32_t ConvolutionSpec::channel_axis(void) const
 {
   return query_on(ifm_shape()).axis(axis_specifier(_param.axis()));
@@ -112,7 +110,8 @@ nncc::core::ADT::tensor::Shape ConvolutionSpec::ker_shape(void) const
   res.resize(2 + num_spatial_axes());
 
   res.dim(0) = ker_count();
-  res.dim(1) = ifm_dim(channel_axis());
+  assert(ifm_dim(channel_axis()) % group() == 0);
+  res.dim(1) = ifm_dim(channel_axis()) / group();
   for (uint32_t axis = 0; axis < num_spatial_axes(); ++axis)
   {
     res.dim(2 + axis) = ker_dim(axis);
index b584411..5df3bd7 100644 (file)
@@ -14,6 +14,8 @@ public:
   uint32_t ifm_rank(void) const { return _ifm_shape.rank(); }
   uint32_t ifm_dim(uint32_t axis) const { return _ifm_shape.dim(axis); }
 
+  uint32_t group(void) const;
+
   uint32_t channel_axis(void) const;
 
   uint32_t num_batch_axes(void) const { return channel_axis(); }
index 8599d6c..9a25e19 100644 (file)
@@ -332,3 +332,61 @@ TEST_F(ConvolutionSpecTest, conv_ker_hw)
     ASSERT_EQ(expected, obtained);
   }
 }
+
+namespace
+{
+// clang-format off
+const char *dconv = STRING(
+layer {
+  name: "data"
+  type: "Input"
+  top: "data"
+  input_param {
+    shape: { dim: 1 dim: 3 dim: 16 dim: 16 }
+  }
+}
+layer {
+  name: "conv"
+  type: "Convolution"
+  bottom: "data"
+  top: "conv"
+  convolution_param {
+    bias_term: false
+    num_output: 3
+    kernel_size: 3
+    group: 3
+  }
+}
+);
+// clang-format on
+} // namespace
+
+TEST_F(ConvolutionSpecTest, dconv)
+{
+  ::caffe::NetParameter param;
+
+  ASSERT_TRUE(load(dconv, param));
+
+  ::caffe::Net<float> net{param};
+
+  const tensor::Shape ifm_shape{1, 3, 16, 16};
+  ConvolutionSpec spec{param.layer(1).convolution_param()};
+
+  spec.ifm_shape(ifm_shape);
+
+  // Check 'ker_shape'
+  {
+    auto expected = as_tensor_shape(net.layer_by_name("conv")->blobs().at(0)->shape());
+    auto obtained = spec.ker_shape();
+
+    ASSERT_EQ(expected, obtained);
+  }
+
+  // Check 'ofm_shape'
+  {
+    auto expected = as_tensor_shape(net.blob_by_name("conv")->shape());
+    auto obtained = spec.ofm_shape();
+
+    ASSERT_EQ(expected, obtained);
+  }
+}