--- /dev/null
+#ifndef __CONVOLUTION_LAYER_H__
+#define __CONVOLUTION_LAYER_H__
+
+#include "Layer.h"
+#include "Network.h"
+
+#include "BlobShape.h"
+
+#include <caffe.pb.h>
+
+class ConvolutionLayer final : public Layer
+{
+public:
+ ConvolutionLayer(const Network *net, caffe::LayerParameter *p);
+
+public:
+ uint32_t bottom_size(void) const override;
+ const std::string &bottom_name(uint32_t n) const override;
+ const BlobShape &bottom_shape(uint32_t n) const override;
+
+public:
+ uint32_t top_size(void) const override;
+ const std::string &top_name(uint32_t n) const override;
+ BlobShape top_shape(uint32_t n) const override;
+
+public:
+ void accept(LayerAnalysisPass &&) const override;
+ void accept(LayerTransformPass &&) override;
+
+public:
+ const caffe::LayerParameter ¶m(void) const { return *_param; }
+ caffe::LayerParameter ¶m(void) { return *_param; }
+
+public:
+ caffe::ConvolutionParameter &conv_param(void)
+ {
+ return *param().mutable_convolution_param();
+ }
+
+ const caffe::ConvolutionParameter &conv_param(void) const
+ {
+ return param().convolution_param();
+ }
+
+public:
+ const std::string &input_name(void) const;
+ const BlobShape &input_shape(void) const;
+
+public:
+ const std::string &output_name(void) const;
+ BlobShape output_shape(void) const;
+
+public:
+ uint32_t channel_axis(void) const;
+ uint32_t num_effective_output(void) const;
+
+public:
+ uint32_t num_spatial_axes(void) const;
+ uint32_t num_batch_axes(void) const;
+
+public:
+ uint32_t pad(uint32_t spatial_axe) const;
+ uint32_t kernel_size(uint32_t spatial_axe) const;
+ uint32_t stride(uint32_t spatial_axe) const;
+ uint32_t dilation(uint32_t spatial_axe) const;
+
+private:
+ const Network * const _net;
+ caffe::LayerParameter * const _param;
+};
+
+#endif // __CONVOLUTION_LAYER_H__
--- /dev/null
+#include "ConvolutionLayer.h"
+#include "LayerAnalysisPass.h"
+#include "LayerTransformPass.h"
+
+#include <cassert>
+
+ConvolutionLayer::ConvolutionLayer(const Network *net, caffe::LayerParameter *p)
+ : _net{net}, _param{p}
+{
+ assert(param().type() == "Convolution");
+ assert(param().mutable_convolution_param() != nullptr);
+
+ // These constratins come from Convolution layer's definition
+ assert(param().bottom_size() == 1);
+ assert(param().top_size() == 1);
+ assert(num_batch_axes() + 1 /*channel axis*/ + num_spatial_axes() == input_shape().rank());
+
+ // TODO Support force_nd_im2col option
+ assert(!conv_param().force_nd_im2col());
+ // TODO Support negative axis
+ assert(conv_param().axis() > 0);
+ // TODO Support multi-group convolution
+ assert(conv_param().group() == 1);
+
+ // Comment on ConvolutionParameter (in caffe.proto)
+ // Pad, kernel size, and stride are all given as a single value for equal
+ // dimensions in all spatial dimensions, or once per spatial dimension.
+ //
+ // NOTE 'equal dimensions in all spatial dimensions' schema is supported
+ // TODO Support 'once per spatial dimension'
+ assert(conv_param().pad_size() == 1);
+ assert(conv_param().kernel_size_size() == 1);
+ assert(conv_param().stride_size() == 1);
+
+ // NOTE 'dilation' is not supported yet
+ // TODO Support 'dilation'
+ assert(conv_param().dilation_size() == 0);
+}
+
+uint32_t ConvolutionLayer::bottom_size(void) const { return 1; }
+
+const std::string &ConvolutionLayer::bottom_name(uint32_t n) const
+{
+ assert(n == 0);
+ return input_name();
+}
+
+const BlobShape &ConvolutionLayer::bottom_shape(uint32_t n) const
+{
+ assert(n == 0);
+ return input_shape();
+}
+
+uint32_t ConvolutionLayer::top_size(void) const { return 1; }
+
+const std::string &ConvolutionLayer::top_name(uint32_t n) const
+{
+ assert(n == 0);
+ return output_name();
+}
+
+BlobShape ConvolutionLayer::top_shape(uint32_t n) const
+{
+ assert(n == 0);
+ return output_shape();
+}
+
+void ConvolutionLayer::accept(LayerAnalysisPass &&v) const { v.visit(*this); }
+void ConvolutionLayer::accept(LayerTransformPass &&v) { v.visit(*this); }
+
+const std::string &ConvolutionLayer::input_name(void) const { return param().bottom(0); }
+const BlobShape &ConvolutionLayer::input_shape(void) const
+{
+ return _net->blobs().at(input_name());
+}
+
+const std::string &ConvolutionLayer::output_name(void) const { return param().top(0); }
+BlobShape ConvolutionLayer::output_shape(void) const
+{
+ // The code below is derived from Caffe
+ // - Please refer to 'compute_output_shape' method in 'caffe::ConvolutionLayer' for details
+ BlobShape res{};
+
+ res.resize(num_batch_axes() + 1 + num_spatial_axes());
+
+ for (uint32_t batch_axis = 0; batch_axis < num_batch_axes(); ++batch_axis)
+ {
+ res.dim(batch_axis) = input_shape().dim(batch_axis);
+ }
+
+ res.dim(num_batch_axes()) = num_effective_output();
+
+ for (int spatial_axis = 0; spatial_axis < num_spatial_axes(); ++spatial_axis)
+ {
+ const uint32_t axis = num_batch_axes() + 1 + spatial_axis;
+ const int64_t kernel_ext =
+ dilation(spatial_axis) * (kernel_size(spatial_axis) - 1) + 1;
+
+ res.dim(axis) =
+ (input_shape().dim(axis) + 2 * pad(spatial_axis) - kernel_ext) / stride(spatial_axis);
+ }
+
+ return res;
+}
+
+uint32_t ConvolutionLayer::channel_axis(void) const
+{
+ return conv_param().axis();
+}
+
+uint32_t ConvolutionLayer::num_effective_output(void) const
+{
+ return conv_param().num_output();
+}
+
+uint32_t ConvolutionLayer::num_spatial_axes(void) const
+{
+ assert(input_shape().rank() > channel_axis());
+ return input_shape().rank() - channel_axis() - 1;
+}
+
+uint32_t ConvolutionLayer::num_batch_axes(void) const
+{
+ return input_shape().rank() - num_spatial_axes() - 1;
+}
+
+uint32_t ConvolutionLayer::pad(uint32_t spatial_axis) const
+{
+ return conv_param().pad(0);
+}
+
+uint32_t ConvolutionLayer::kernel_size(uint32_t spatial_axis) const
+{
+ return conv_param().kernel_size(0);
+}
+
+uint32_t ConvolutionLayer::stride(uint32_t spatial_axis) const
+{
+ return conv_param().stride(0);
+}
+
+uint32_t ConvolutionLayer::dilation(uint32_t spatial_axis) const
+{
+ return 1;
+}