#include "LayerResolver.h"
#include "InputLayer.h"
+#include "ConvolutionLayer.h"
#include <nncc/foundation/Memory.h>
LayerResolver::LayerResolver()
{
_factories["Input"] = make_factory<InputLayer>();
+ _factories["Convolution"] = make_factory<ConvolutionLayer>();
}
const LayerFactory &LayerResolver::resolve(const std::string &type) const
return;
}
-void ParameterRandomizePass::visit(ConvolutionLayer &)
+void ParameterRandomizePass::visit(ConvolutionLayer &l)
{
- throw std::runtime_error{"Not supported, yet"};
+ assert(l.param().blobs_size() == 0);
+
+ caffe::ConvolutionParameter *conv_param = l.param().mutable_convolution_param();
+
+ auto element_count = [] (caffe::BlobShape &shape)
+ {
+ assert(shape.dim_size() > 0);
+
+ int64_t count = 1;
+
+ for (int axis = 0; axis < shape.dim_size(); ++axis)
+ {
+ count *= shape.dim(axis);
+ }
+
+ return count;
+ };
+
+ //
+ // Fill Kernel
+ //
+ caffe::BlobProto *weight_blob = l.param().add_blobs();
+ caffe::BlobShape *weight_shape = weight_blob->mutable_shape();
+
+ weight_shape->add_dim(l.num_effective_output());
+ weight_shape->add_dim(l.input_shape().dim(l.channel_axis()));
+
+ for (int spatial_axis = 0; spatial_axis < l.num_spatial_axes(); ++spatial_axis)
+ {
+ const auto kernel_dim = l.kernel_size(spatial_axis);
+ weight_shape->add_dim(kernel_dim);
+ }
+
+ // TODO Allow users to set mean and stddev
+ std::normal_distribution<float> weight_distribution(0.0f, 2.0f);
+
+ for (int64_t n = 0; n < element_count(*weight_shape); ++n)
+ {
+ weight_blob->add_data(weight_distribution(_generator));
+ }
+
+ //
+ // Fill Bias
+ //
+ assert(conv_param->bias_term());
+ caffe::BlobProto *bias_blob = l.param().add_blobs();
+ caffe::BlobShape *bias_shape = bias_blob->mutable_shape();
+
+ bias_shape->add_dim(l.num_effective_output());
+
+ // TODO Allow users to set mean and stddev
+ std::normal_distribution<float> bias_distribution(0.0f, 16.0f);
+
+ for (int64_t n = 0; n < element_count(*bias_shape); ++n)
+ {
+ bias_blob->add_data(bias_distribution(_generator));
+ }
}