ConvolutionSpec::ConvolutionSpec(const ::caffe::ConvolutionParameter ¶m)
: _param(param), _num_output{0}
{
- // NOTE Padding is not supported, yet
- // TODO Support padding
- assert(param.pad().size() == 0);
- assert(param.pad_h() == 0);
- assert(param.pad_w() == 0);
-
// NOTE Dilation is not supported, yet
// TODO Support dilation
assert(param.dilation().size() == 0);
return canonical_channel_axis(_param.axis()).eval(ifm_shape());
}
+uint32_t ConvolutionSpec::pad(uint32_t spatial_axis) const
+{
+ assert(spatial_axis < num_spatial_axes());
+
+ // TODO Support pad_h/pad_w parameters
+ assert(!_param.has_pad_h());
+ assert(!_param.has_pad_w());
+
+ if (_param.pad().size() == 0)
+ {
+ // NOTE default pad is 0
+ return 0;
+ }
+
+ if (_param.pad().size() == 1)
+ {
+ return _param.pad(0);
+ }
+
+ assert(_param.pad().size() == num_spatial_axes());
+ return _param.pad(spatial_axis);
+}
+
uint32_t ConvolutionSpec::stride(uint32_t spatial_axis) const
{
assert(spatial_axis < num_spatial_axes());
uint32_t dim = 0;
- dim += ifm_dim(full_axis) - ker_dim(spatial_axis);
+ dim += ifm_dim(full_axis) - ker_dim(spatial_axis) + 2 * pad(spatial_axis);
dim /= stride(spatial_axis);
dim += 1;
uint32_t num_batch_axes(void) const { return channel_axis(); }
uint32_t num_spatial_axes(void) const { return ifm_rank() - channel_axis() - 1; }
+ uint32_t pad(uint32_t spatial_axis) const;
uint32_t stride(uint32_t spatial_axis) const;
uint32_t ker_dim(uint32_t spatial_axis) const;
ASSERT_EQ(expected, obtained);
}
}
+
+namespace
+{
+// clang-format off
+const char *conv_pad = STRING(
+layer {
+ name: "data"
+ type: "Input"
+ top: "data"
+ input_param {
+ shape: { dim: 1 dim: 3 dim: 16 dim: 16 }
+ }
+}
+layer {
+ name: "conv"
+ type: "Convolution"
+ bottom: "data"
+ top: "conv"
+ convolution_param {
+ bias_term: false
+ num_output: 2
+ pad: 2
+ kernel_size: 3
+ }
+}
+);
+// clang-format on
+} // namespace
+
+TEST_F(ConvolutionSpecTest, conv_pad)
+{
+ ::caffe::NetParameter param;
+
+ ASSERT_TRUE(load(conv_pad, param));
+
+ ::caffe::Net<float> net{param};
+
+ const tensor::Shape ifm_shape{1, 3, 16, 16};
+ ConvolutionSpec spec{param.layer(1).convolution_param()};
+
+ spec.ifm_shape(ifm_shape);
+
+ // Check 'pad'
+ ASSERT_EQ(spec.pad(0), 2);
+ ASSERT_EQ(spec.pad(1), 2);
+
+ // Check 'ofm_shape'
+ {
+ auto expected = as_tensor_shape(net.blob_by_name("conv")->shape());
+ auto obtained = spec.ofm_shape();
+
+ ASSERT_EQ(expected, obtained);
+ }
+}