uint32_t rank(void) const { return _dims.size(); }
public:
- BlobShape &resize(uint32_t size) { _dims.resize(size); }
+ BlobShape &resize(uint32_t size)
+ {
+ _dims.resize(size);
+ return (*this);
+ }
public:
int64_t dim(uint32_t axe) const { return _dims.at(axe); }
caffe::LayerParameter ¶m(void) { return *_param; }
private:
- const Network * const _net;
caffe::LayerParameter * const _param;
};
res.dim(num_batch_axes()) = num_effective_output();
- for (int spatial_axis = 0; spatial_axis < num_spatial_axes(); ++spatial_axis)
+ for (uint32_t spatial_axis = 0; spatial_axis < num_spatial_axes(); ++spatial_axis)
{
const uint32_t axis = num_batch_axes() + 1 + spatial_axis;
const int64_t kernel_ext =
return input_shape().rank() - num_spatial_axes() - 1;
}
-uint32_t ConvolutionLayer::pad(uint32_t spatial_axis) const
+uint32_t ConvolutionLayer::pad(uint32_t /*spatial_axis*/) const
{
return conv_param().pad(0);
}
-uint32_t ConvolutionLayer::kernel_size(uint32_t spatial_axis) const
+uint32_t ConvolutionLayer::kernel_size(uint32_t /*spatial_axis*/) const
{
return conv_param().kernel_size(0);
}
-uint32_t ConvolutionLayer::stride(uint32_t spatial_axis) const
+uint32_t ConvolutionLayer::stride(uint32_t /*spatial_axis*/) const
{
return conv_param().stride(0);
}
-uint32_t ConvolutionLayer::dilation(uint32_t spatial_axis) const
+uint32_t ConvolutionLayer::dilation(uint32_t /*spatial_axis*/) const
{
return 1;
}
#include <cassert>
-InputLayer::InputLayer(const Network *net, caffe::LayerParameter *p) : _net{net}, _param{p}
+InputLayer::InputLayer(const Network *, caffe::LayerParameter *p) : _param{p}
{
assert(_param != nullptr);
assert(param().type() == "Input");
uint32_t InputLayer::bottom_size(void) const { return 0; }
-const std::string &InputLayer::bottom_name(uint32_t n) const
+const std::string &InputLayer::bottom_name(uint32_t) const
{
throw std::invalid_argument{"n"};
}
-const BlobShape &InputLayer::bottom_shape(uint32_t n) const
+const BlobShape &InputLayer::bottom_shape(uint32_t) const
{
throw std::invalid_argument{"n"};
}
LayerContext &LayerContext::append(std::unique_ptr<Layer> &&l)
{
_layers.emplace_back(std::move(l));
+ return (*this);
}
res->layers().append(std::move(l));
}
- return std::move(res);
+ return res;
}
weight_shape->add_dim(l.num_effective_output());
weight_shape->add_dim(l.input_shape().dim(l.channel_axis()));
- for (int spatial_axis = 0; spatial_axis < l.num_spatial_axes(); ++spatial_axis)
+ for (uint32_t spatial_axis = 0; spatial_axis < l.num_spatial_axes(); ++spatial_axis)
{
const auto kernel_dim = l.kernel_size(spatial_axis);
weight_shape->add_dim(kernel_dim);