file(GLOB_RECURSE SOURCES "src/*.cpp")
add_executable(caffegen ${SOURCES})
-target_include_directories(caffegen PRIVATE include)
target_link_libraries(caffegen stdex)
target_link_libraries(caffegen cli)
target_link_libraries(caffegen caffeproto)
+++ /dev/null
-#ifndef __BLOB_CONTEXT_H__
-#define __BLOB_CONTEXT_H__
-
-#include "BlobShape.h"
-
-#include <string>
-#include <map>
-
-class BlobContext
-{
-public:
- const BlobShape &at(const std::string &name) const;
-
-public:
- BlobContext &insert(const std::string &name, const BlobShape &shape);
-
-private:
- std::map<std::string, BlobShape> _shapes;
-};
-
-#endif // __BLOB_CONTEXT_H__
+++ /dev/null
-#ifndef __BLOB_SHAPE_H__
-#define __BLOB_SHAPE_H__
-
-#include <vector>
-#include <cstdint>
-
-class BlobShape
-{
-public:
- uint32_t rank(void) const { return _dims.size(); }
-
-public:
- BlobShape &resize(uint32_t size)
- {
- _dims.resize(size);
- return (*this);
- }
-
-public:
- int64_t dim(uint32_t axe) const { return _dims.at(axe); }
- int64_t &dim(uint32_t axe) { return _dims.at(axe); }
-
-private:
- std::vector<int64_t> _dims;
-};
-
-#endif // __BLOB_SHAPE_H__
+++ /dev/null
-#ifndef __CONVOLUTION_LAYER_H__
-#define __CONVOLUTION_LAYER_H__
-
-#include "Layer.h"
-#include "Network.h"
-
-#include "BlobShape.h"
-
-#include <caffe/proto/caffe.pb.h>
-
-class ConvolutionLayer final : public Layer
-{
-public:
- ConvolutionLayer(const Network *net, caffe::LayerParameter *p);
-
-public:
- uint32_t bottom_size(void) const override;
- const std::string &bottom_name(uint32_t n) const override;
- const BlobShape &bottom_shape(uint32_t n) const override;
-
-public:
- uint32_t top_size(void) const override;
- const std::string &top_name(uint32_t n) const override;
- BlobShape top_shape(uint32_t n) const override;
-
-public:
- void accept(LayerAnalysisPass &&) const override;
- void accept(LayerTransformPass &&) override;
-
-public:
- const caffe::LayerParameter ¶m(void) const { return *_param; }
- caffe::LayerParameter ¶m(void) { return *_param; }
-
-public:
- caffe::ConvolutionParameter &conv_param(void) { return *param().mutable_convolution_param(); }
-
- const caffe::ConvolutionParameter &conv_param(void) const { return param().convolution_param(); }
-
-public:
- const std::string &input_name(void) const;
- const BlobShape &input_shape(void) const;
-
-public:
- const std::string &output_name(void) const;
- BlobShape output_shape(void) const;
-
-public:
- uint32_t channel_axis(void) const;
- uint32_t num_effective_output(void) const;
-
-public:
- uint32_t num_spatial_axes(void) const;
- uint32_t num_batch_axes(void) const;
-
-public:
- uint32_t pad(uint32_t spatial_axe) const;
- uint32_t kernel_size(uint32_t spatial_axe) const;
- uint32_t stride(uint32_t spatial_axe) const;
- uint32_t dilation(uint32_t spatial_axe) const;
-
-private:
- const Network *const _net;
- caffe::LayerParameter *const _param;
-};
-
-#endif // __CONVOLUTION_LAYER_H__
+++ /dev/null
-#ifndef __INPUT_LAYER_H__
-#define __INPUT_LAYER_H__
-
-#include "Layer.h"
-#include "Network.h"
-
-#include "BlobShape.h"
-
-#include <caffe/proto/caffe.pb.h>
-
-class InputLayer final : public Layer
-{
-public:
- InputLayer(const Network *net, caffe::LayerParameter *p);
-
-public:
- uint32_t bottom_size(void) const override;
- const std::string &bottom_name(uint32_t n) const override;
- const BlobShape &bottom_shape(uint32_t n) const override;
-
-public:
- uint32_t top_size(void) const override;
- const std::string &top_name(uint32_t n) const override;
- BlobShape top_shape(uint32_t n) const override;
-
-public:
- void accept(LayerAnalysisPass &&) const override;
- void accept(LayerTransformPass &&) override;
-
-public:
- const caffe::LayerParameter ¶m(void) const { return *_param; }
- caffe::LayerParameter ¶m(void) { return *_param; }
-
-private:
- caffe::LayerParameter *const _param;
-};
-
-#endif // __INPUT_LAYER_H__
+++ /dev/null
-#ifndef __LAYER_H__
-#define __LAYER_H__
-
-#include "BlobShape.h"
-
-#include <string>
-
-struct LayerAnalysisPass;
-struct LayerTransformPass;
-
-struct Layer
-{
- virtual ~Layer() = default;
-
- virtual uint32_t bottom_size(void) const = 0;
- virtual const std::string &bottom_name(uint32_t n) const = 0;
- virtual const BlobShape &bottom_shape(uint32_t n) const = 0;
-
- virtual uint32_t top_size(void) const = 0;
- virtual const std::string &top_name(uint32_t n) const = 0;
- virtual BlobShape top_shape(uint32_t n) const = 0;
-
- virtual void accept(LayerAnalysisPass &&) const = 0;
- virtual void accept(LayerTransformPass &&) = 0;
-};
-
-#endif // __LAYER_H__
+++ /dev/null
-#ifndef __LAYER_ANALYSIS_PASS_H__
-#define __LAYER_ANALYSIS_PASS_H__
-
-#include "InputLayer.h"
-#include "ConvolutionLayer.h"
-
-struct LayerAnalysisPass
-{
- virtual ~LayerAnalysisPass() = default;
-
- virtual void visit(const InputLayer &) = 0;
- virtual void visit(const ConvolutionLayer &) = 0;
-};
-
-#endif // __LAYER_ANALYSIS_PASS_H__
+++ /dev/null
-#ifndef __LAYER_CONTEXT_H__
-#define __LAYER_CONTEXT_H__
-
-#include "Layer.h"
-
-#include <vector>
-#include <memory>
-#include <cstdint>
-
-class LayerContext
-{
-public:
- uint32_t size(void) const;
-
-public:
- Layer &at(uint32_t n);
- const Layer &at(uint32_t n) const;
-
-public:
- LayerContext &append(std::unique_ptr<Layer> &&l);
-
-private:
- std::vector<std::unique_ptr<Layer>> _layers;
-};
-
-#endif // __LAYER_CONTEXT_H__
+++ /dev/null
-#ifndef __LAYER_FACTORY_H__
-#define __LAYER_FACTORY_H__
-
-#include "Network.h"
-
-#include <caffe/proto/caffe.pb.h>
-
-#include <memory>
-
-struct LayerFactory
-{
- virtual ~LayerFactory() = default;
-
- virtual std::unique_ptr<Layer> make(Network *, caffe::LayerParameter *) const = 0;
-};
-
-#endif // __LAYER_FACTORY_H__
+++ /dev/null
-#ifndef __LAYER_RESOLVER_H__
-#define __LAYER_RESOLVER_H__
-
-#include "LayerFactory.h"
-
-#include <map>
-#include <memory>
-#include <string>
-
-class LayerResolver
-{
-public:
- LayerResolver();
-
-public:
- const LayerFactory &resolve(const std::string &type) const;
-
-private:
- // NOTE Here std::shared_ptr<LayerFactory> is used instead of std::unique_ptr<LayerFactory>
- // as GCC 4.8.3 complains about the use of copy constructor even when _factories field
- // is never used.
- std::map<std::string, std::shared_ptr<LayerFactory>> _factories;
-};
-
-#endif // __LAYER_RESOLVER_H__
+++ /dev/null
-#ifndef __LAYER_TRANSFORM_PASS_H__
-#define __LAYER_TRANSFORM_PASS_H__
-
-#include "InputLayer.h"
-#include "ConvolutionLayer.h"
-
-struct LayerTransformPass
-{
- virtual ~LayerTransformPass() = default;
-
- virtual void visit(InputLayer &) = 0;
- virtual void visit(ConvolutionLayer &) = 0;
-};
-
-#endif // __LAYER_TRANSFORM_PASS_H__
+++ /dev/null
-#ifndef __NETWORK_H__
-#define __NETWORK_H__
-
-#include "BlobContext.h"
-#include "LayerContext.h"
-
-#include <caffe/proto/caffe.pb.h>
-
-#include <memory>
-
-class Network
-{
-public:
- explicit Network(std::unique_ptr<::caffe::NetParameter> &¶m);
-
-public:
- LayerContext &layers(void) { return _layers; }
- const LayerContext &layers(void) const { return _layers; }
-
-public:
- BlobContext &blobs(void) { return _blobs; }
- const BlobContext &blobs(void) const { return _blobs; }
-
-public:
- ::caffe::NetParameter ¶m(void) { return *_param; }
- const ::caffe::NetParameter ¶m(void) const { return *_param; }
-
-private:
- const std::unique_ptr<::caffe::NetParameter> _param;
-
-private:
- BlobContext _blobs;
- LayerContext _layers;
-};
-
-#endif // __NETWORK_H__
+++ /dev/null
-#ifndef __NETWORK_BUILDER_H__
-#define __NETWORK_BUILDER_H__
-
-#include "LayerResolver.h"
-
-#include <caffe/proto/caffe.pb.h>
-
-#include <memory>
-
-class NetworkBuilder
-{
-public:
- NetworkBuilder(const LayerResolver &resolver);
-
-public:
- std::unique_ptr<Network> build(std::unique_ptr<caffe::NetParameter> &&);
-
-private:
- const LayerResolver &_resolver;
-};
-
-#endif // __NETWORK_BUILDER_H__
#include "InitCommand.h"
-#include "FillCommand.h"
#include "EncodeCommand.h"
#include "DecodeCommand.h"
#include "MergeCommand.h"
// all receive data from stdin
app.insert("init", make_unique<InitCommand>());
- app.insert("fill", make_unique<FillCommand>());
app.insert("encode", make_unique<EncodeCommand>());
app.insert("decode", make_unique<DecodeCommand>());
// takes 2 args: prototxt model and caffemodel weights in that order
+++ /dev/null
-#include "FillCommand.h"
-#include "internal/LayerResolver.h"
-#include "internal/NetworkBuilder.h"
-#include "internal/ParameterRandomizePass.h"
-
-#include <caffe/proto/caffe.pb.h>
-
-#include <stdex/Memory.h>
-
-#include <google/protobuf/io/coded_stream.h>
-#include <google/protobuf/io/zero_copy_stream_impl.h>
-#include <google/protobuf/text_format.h>
-
-#include <chrono>
-#include <random>
-#include <iostream>
-
-using stdex::make_unique;
-
-int FillCommand::run(int, const char *const *) const
-{
- auto param = make_unique<::caffe::NetParameter>();
-
- // Read from standard input
- google::protobuf::io::FileInputStream is{0};
- if (!google::protobuf::TextFormat::Parse(&is, param.get()))
- {
- std::cerr << "ERROR: Failed to parse prototxt" << std::endl;
- return 255;
- }
-
- auto net = NetworkBuilder{LayerResolver{}}.build(std::move(param));
-
- uint32_t seed = std::chrono::system_clock::now().time_since_epoch().count();
-
- // Allow users to override seed
- {
- char *env = std::getenv("SEED");
-
- if (env)
- {
- seed = std::stoi(env);
- }
- }
-
- std::cerr << "Use '" << seed << "' as seed" << std::endl;
-
- // Create a random number generator
- std::default_random_engine generator{seed};
-
- // Randomize parameters
- for (uint32_t n = 0; n < net->layers().size(); ++n)
- {
- net->layers().at(n).accept(ParameterRandomizePass{generator});
- }
-
- // Write to standard output
- google::protobuf::io::FileOutputStream output(1);
- google::protobuf::TextFormat::Print(net->param(), &output);
-
- return 0;
-}
+++ /dev/null
-#ifndef __FILL_COMMAND_H__
-#define __FILL_COMMAND_H__
-
-#include <cli/Command.h>
-
-struct FillCommand final : public cli::Command
-{
- int run(int argc, const char *const *argv) const override;
-};
-
-#endif // __FILL_COMMAND_H__
+++ /dev/null
-#include "internal/BlobContext.h"
-
-const BlobShape &BlobContext::at(const std::string &name) const { return _shapes.at(name); }
-
-BlobContext &BlobContext::insert(const std::string &name, const BlobShape &shape)
-{
- _shapes[name] = shape;
- return (*this);
-}
+++ /dev/null
-#include "internal/ConvolutionLayer.h"
-#include "internal/LayerAnalysisPass.h"
-#include "internal/LayerTransformPass.h"
-
-#include <cassert>
-
-ConvolutionLayer::ConvolutionLayer(const Network *net, caffe::LayerParameter *p)
- : _net{net}, _param{p}
-{
- assert(param().type() == "Convolution");
- assert(param().mutable_convolution_param() != nullptr);
-
- // These constratins come from Convolution layer's definition
- assert(param().bottom_size() == 1);
- assert(param().top_size() == 1);
- assert(num_batch_axes() + 1 /*channel axis*/ + num_spatial_axes() == input_shape().rank());
-
- // TODO Support force_nd_im2col option
- assert(!conv_param().force_nd_im2col());
- // TODO Support negative axis
- assert(conv_param().axis() > 0);
- // TODO Support multi-group convolution
- assert(conv_param().group() == 1);
-
- // Comment on ConvolutionParameter (in caffe.proto)
- // Pad, kernel size, and stride are all given as a single value for equal
- // dimensions in all spatial dimensions, or once per spatial dimension.
- //
- // NOTE 'equal dimensions in all spatial dimensions' schema is supported
- // TODO Support 'once per spatial dimension'
- assert(conv_param().pad_size() == 1);
- assert(conv_param().kernel_size_size() == 1);
- assert(conv_param().stride_size() == 1);
-
- // NOTE 'dilation' is not supported yet
- // TODO Support 'dilation'
- assert(conv_param().dilation_size() == 0);
-}
-
-uint32_t ConvolutionLayer::bottom_size(void) const { return 1; }
-
-const std::string &ConvolutionLayer::bottom_name(uint32_t n) const
-{
- assert(n == 0);
- return input_name();
-}
-
-const BlobShape &ConvolutionLayer::bottom_shape(uint32_t n) const
-{
- assert(n == 0);
- return input_shape();
-}
-
-uint32_t ConvolutionLayer::top_size(void) const { return 1; }
-
-const std::string &ConvolutionLayer::top_name(uint32_t n) const
-{
- assert(n == 0);
- return output_name();
-}
-
-BlobShape ConvolutionLayer::top_shape(uint32_t n) const
-{
- assert(n == 0);
- return output_shape();
-}
-
-void ConvolutionLayer::accept(LayerAnalysisPass &&v) const { v.visit(*this); }
-void ConvolutionLayer::accept(LayerTransformPass &&v) { v.visit(*this); }
-
-const std::string &ConvolutionLayer::input_name(void) const { return param().bottom(0); }
-const BlobShape &ConvolutionLayer::input_shape(void) const
-{
- return _net->blobs().at(input_name());
-}
-
-const std::string &ConvolutionLayer::output_name(void) const { return param().top(0); }
-BlobShape ConvolutionLayer::output_shape(void) const
-{
- // The code below is derived from Caffe
- // - Please refer to 'compute_output_shape' method in 'caffe::ConvolutionLayer' for details
- BlobShape res{};
-
- res.resize(num_batch_axes() + 1 + num_spatial_axes());
-
- for (uint32_t batch_axis = 0; batch_axis < num_batch_axes(); ++batch_axis)
- {
- res.dim(batch_axis) = input_shape().dim(batch_axis);
- }
-
- res.dim(num_batch_axes()) = num_effective_output();
-
- for (uint32_t spatial_axis = 0; spatial_axis < num_spatial_axes(); ++spatial_axis)
- {
- const uint32_t axis = num_batch_axes() + 1 + spatial_axis;
- const int64_t kernel_ext = dilation(spatial_axis) * (kernel_size(spatial_axis) - 1) + 1;
-
- res.dim(axis) =
- (input_shape().dim(axis) + 2 * pad(spatial_axis) - kernel_ext) / stride(spatial_axis);
- }
-
- return res;
-}
-
-uint32_t ConvolutionLayer::channel_axis(void) const { return conv_param().axis(); }
-
-uint32_t ConvolutionLayer::num_effective_output(void) const { return conv_param().num_output(); }
-
-uint32_t ConvolutionLayer::num_spatial_axes(void) const
-{
- assert(input_shape().rank() > channel_axis());
- return input_shape().rank() - channel_axis() - 1;
-}
-
-uint32_t ConvolutionLayer::num_batch_axes(void) const
-{
- return input_shape().rank() - num_spatial_axes() - 1;
-}
-
-uint32_t ConvolutionLayer::pad(uint32_t /*spatial_axis*/) const { return conv_param().pad(0); }
-
-uint32_t ConvolutionLayer::kernel_size(uint32_t /*spatial_axis*/) const
-{
- return conv_param().kernel_size(0);
-}
-
-uint32_t ConvolutionLayer::stride(uint32_t /*spatial_axis*/) const
-{
- return conv_param().stride(0);
-}
-
-uint32_t ConvolutionLayer::dilation(uint32_t /*spatial_axis*/) const { return 1; }
+++ /dev/null
-#include "internal/InputLayer.h"
-#include "internal/LayerAnalysisPass.h"
-#include "internal/LayerTransformPass.h"
-
-#include <cassert>
-
-InputLayer::InputLayer(const Network *, caffe::LayerParameter *p) : _param{p}
-{
- assert(_param != nullptr);
- assert(param().type() == "Input");
- assert(param().bottom_size() == 0);
- assert(param().top_size() == param().input_param().shape_size());
-}
-
-uint32_t InputLayer::bottom_size(void) const { return 0; }
-
-const std::string &InputLayer::bottom_name(uint32_t) const { throw std::invalid_argument{"n"}; }
-
-const BlobShape &InputLayer::bottom_shape(uint32_t) const { throw std::invalid_argument{"n"}; }
-
-uint32_t InputLayer::top_size(void) const { return param().top_size(); }
-
-const std::string &InputLayer::top_name(uint32_t n) const { return param().top(n); }
-
-BlobShape InputLayer::top_shape(uint32_t n) const
-{
- BlobShape shape;
-
- const auto &shape_param = param().input_param().shape(n);
- const auto num_axes = shape_param.dim_size();
-
- shape.resize(num_axes);
-
- for (int axe = 0; axe < num_axes; ++axe)
- {
- shape.dim(axe) = shape_param.dim(axe);
- }
-
- return shape;
-}
-
-void InputLayer::accept(LayerAnalysisPass &&v) const { v.visit(*this); }
-void InputLayer::accept(LayerTransformPass &&v) { v.visit(*this); }
+++ /dev/null
-#include "internal/LayerAnalysisPass.h"
-
-// NOTE This file is introduced to ensure that 'LayerAnalyissPass.h' is self-complete.
+++ /dev/null
-#include "internal/LayerContext.h"
-
-uint32_t LayerContext::size(void) const { return _layers.size(); };
-
-Layer &LayerContext::at(uint32_t n) { return *(_layers.at(n)); }
-const Layer &LayerContext::at(uint32_t n) const { return *(_layers.at(n)); }
-
-LayerContext &LayerContext::append(std::unique_ptr<Layer> &&l)
-{
- _layers.emplace_back(std::move(l));
- return (*this);
-}
+++ /dev/null
-#include "internal/LayerResolver.h"
-#include "internal/InputLayer.h"
-#include "internal/ConvolutionLayer.h"
-
-#include <stdex/Memory.h>
-
-using stdex::make_unique;
-
-template <typename T> std::shared_ptr<LayerFactory> make_factory(void)
-{
- struct LayerFactoryImpl final : public LayerFactory
- {
- std::unique_ptr<Layer> make(Network *net, caffe::LayerParameter *p) const override
- {
- return make_unique<T>(net, p);
- }
- };
-
- return make_unique<LayerFactoryImpl>();
-}
-
-LayerResolver::LayerResolver()
-{
- _factories["Input"] = make_factory<InputLayer>();
- _factories["Convolution"] = make_factory<ConvolutionLayer>();
-}
-
-const LayerFactory &LayerResolver::resolve(const std::string &type) const
-{
- return *(_factories.at(type));
-}
+++ /dev/null
-#include "internal/Network.h"
-
-Network::Network(std::unique_ptr<::caffe::NetParameter> &¶m) : _param{std::move(param)}
-{
- // DO NOTHING
-}
+++ /dev/null
-#include "internal/NetworkBuilder.h"
-
-#include <stdex/Memory.h>
-
-using stdex::make_unique;
-
-NetworkBuilder::NetworkBuilder(const LayerResolver &resolver) : _resolver{resolver}
-{
- // DO NOTHING
-}
-
-std::unique_ptr<Network> NetworkBuilder::build(std::unique_ptr<caffe::NetParameter> &&p)
-{
- auto res = make_unique<Network>(std::move(p));
-
- for (int n = 0; n < res->param().layer_size(); ++n)
- {
- caffe::LayerParameter *layer_param = res->param().mutable_layer(n);
- const std::string &layer_type = layer_param->type();
-
- auto l = _resolver.resolve(layer_type).make(res.get(), layer_param);
-
- for (uint32_t n = 0; n < l->top_size(); ++n)
- {
- res->blobs().insert(l->top_name(n), l->top_shape(n));
- }
-
- res->layers().append(std::move(l));
- }
-
- return res;
-}
+++ /dev/null
-#include "ParameterRandomizePass.h"
-
-#include <stdexcept>
-
-// NOTE GCC 4.8.3 emits the following error with brace-initialization on r-value reference
-// error: invalid initialization of non-const reference of type '..' from an rvalue of type '..'
-ParameterRandomizePass::ParameterRandomizePass(std::default_random_engine &generator)
- : _generator(generator)
-{
- // DO NOTHING
-}
-
-void ParameterRandomizePass::visit(InputLayer &)
-{
- // InputLayer has no parameter to be randomized
- return;
-}
-
-void ParameterRandomizePass::visit(ConvolutionLayer &l)
-{
- assert(l.param().blobs_size() == 0);
-
- caffe::ConvolutionParameter *conv_param = l.param().mutable_convolution_param();
-
- auto element_count = [](caffe::BlobShape &shape) {
- assert(shape.dim_size() > 0);
-
- int64_t count = 1;
-
- for (int axis = 0; axis < shape.dim_size(); ++axis)
- {
- count *= shape.dim(axis);
- }
-
- return count;
- };
-
- //
- // Fill Kernel
- //
- caffe::BlobProto *weight_blob = l.param().add_blobs();
- caffe::BlobShape *weight_shape = weight_blob->mutable_shape();
-
- weight_shape->add_dim(l.num_effective_output());
- weight_shape->add_dim(l.input_shape().dim(l.channel_axis()));
-
- for (uint32_t spatial_axis = 0; spatial_axis < l.num_spatial_axes(); ++spatial_axis)
- {
- const auto kernel_dim = l.kernel_size(spatial_axis);
- weight_shape->add_dim(kernel_dim);
- }
-
- // TODO Allow users to set mean and stddev
- std::normal_distribution<float> weight_distribution(0.0f, 2.0f);
-
- for (int64_t n = 0; n < element_count(*weight_shape); ++n)
- {
- weight_blob->add_data(weight_distribution(_generator));
- }
-
- //
- // Fill Bias
- //
- assert(conv_param->bias_term());
- caffe::BlobProto *bias_blob = l.param().add_blobs();
- caffe::BlobShape *bias_shape = bias_blob->mutable_shape();
-
- bias_shape->add_dim(l.num_effective_output());
-
- // TODO Allow users to set mean and stddev
- std::normal_distribution<float> bias_distribution(0.0f, 16.0f);
-
- for (int64_t n = 0; n < element_count(*bias_shape); ++n)
- {
- bias_blob->add_data(bias_distribution(_generator));
- }
-}
+++ /dev/null
-#ifndef __PARAMETER_RANDOMIZE_PASS_H__
-#define __PARAMETER_RANDOMIZE_PASS_H__
-
-#include "internal/LayerTransformPass.h"
-
-#include <random>
-
-class ParameterRandomizePass : public LayerTransformPass
-{
-public:
- ParameterRandomizePass(std::default_random_engine &generator);
-
-public:
- void visit(InputLayer &) override;
- void visit(ConvolutionLayer &) override;
-
-private:
- std::default_random_engine &_generator;
-};
-
-#endif // __PARAMETER_RANDOMIZE_PASS_H__