This commit extends ConvolutionSpec to handle non-default group parameter.
Signed-off-by: Jonghyun Park <jh1302.park@samsung.com>
// NOTE Dilation is not supported, yet
// TODO Support dilation
assert(param.dilation().size() == 0);
-
- // NOTE Group convolution is not supported, yet
- // TODO Support group convolution
- assert(param.group() == 1);
}
+uint32_t ConvolutionSpec::group(void) const { return _param.group(); }
+
uint32_t ConvolutionSpec::channel_axis(void) const
{
return query_on(ifm_shape()).axis(axis_specifier(_param.axis()));
res.resize(2 + num_spatial_axes());
res.dim(0) = ker_count();
- res.dim(1) = ifm_dim(channel_axis());
+ assert(ifm_dim(channel_axis()) % group() == 0);
+ res.dim(1) = ifm_dim(channel_axis()) / group();
for (uint32_t axis = 0; axis < num_spatial_axes(); ++axis)
{
res.dim(2 + axis) = ker_dim(axis);
uint32_t ifm_rank(void) const { return _ifm_shape.rank(); }
uint32_t ifm_dim(uint32_t axis) const { return _ifm_shape.dim(axis); }
+ uint32_t group(void) const;
+
uint32_t channel_axis(void) const;
uint32_t num_batch_axes(void) const { return channel_axis(); }
ASSERT_EQ(expected, obtained);
}
}
+
+namespace
+{
+// clang-format off
+const char *dconv = STRING(
+layer {
+ name: "data"
+ type: "Input"
+ top: "data"
+ input_param {
+ shape: { dim: 1 dim: 3 dim: 16 dim: 16 }
+ }
+}
+layer {
+ name: "conv"
+ type: "Convolution"
+ bottom: "data"
+ top: "conv"
+ convolution_param {
+ bias_term: false
+ num_output: 3
+ kernel_size: 3
+ group: 3
+ }
+}
+);
+// clang-format on
+} // namespace
+
+TEST_F(ConvolutionSpecTest, dconv)
+{
+ ::caffe::NetParameter param;
+
+ ASSERT_TRUE(load(dconv, param));
+
+ ::caffe::Net<float> net{param};
+
+ const tensor::Shape ifm_shape{1, 3, 16, 16};
+ ConvolutionSpec spec{param.layer(1).convolution_param()};
+
+ spec.ifm_shape(ifm_shape);
+
+ // Check 'ker_shape'
+ {
+ auto expected = as_tensor_shape(net.layer_by_name("conv")->blobs().at(0)->shape());
+ auto obtained = spec.ker_shape();
+
+ ASSERT_EQ(expected, obtained);
+ }
+
+ // Check 'ofm_shape'
+ {
+ auto expected = as_tensor_shape(net.blob_by_name("conv")->shape());
+ auto obtained = spec.ofm_shape();
+
+ ASSERT_EQ(expected, obtained);
+ }
+}