From f6992d6c36dc2c5f3d145f1e290bbf05d0d233aa Mon Sep 17 00:00:00 2001 From: Yangqing Jia Date: Mon, 23 Sep 2013 16:00:47 -0700 Subject: [PATCH] pylint and code cleaning --- src/caffe/blob.cpp | 17 ++++++---- src/caffe/blob.hpp | 4 +-- src/caffe/common.hpp | 4 +-- src/caffe/filler.hpp | 68 ++++++++++++--------------------------- src/caffe/layer.hpp | 18 ++++++----- src/caffe/layer_factory.hpp | 2 ++ src/caffe/syncedmem.cpp | 6 ++-- src/caffe/syncedmem.hpp | 4 +-- src/caffe/util/math_functions.cpp | 30 +++++++++++++++++ src/caffe/util/math_functions.hpp | 7 ++++ 10 files changed, 89 insertions(+), 71 deletions(-) diff --git a/src/caffe/blob.cpp b/src/caffe/blob.cpp index 4bba3f1..d0e47da 100644 --- a/src/caffe/blob.cpp +++ b/src/caffe/blob.cpp @@ -35,13 +35,16 @@ Blob::Blob(const Blob& source) { if (source.count() == 0) { Blob(); } else { - Reshape(source.num(), source.channels(), source.height(), source.width()); + Reshape(source.num(), source.channels(), source.height(), + source.width()); // create the synced memories. data_.reset(new SyncedMemory(count_ * sizeof(Dtype))); diff_.reset(new SyncedMemory(count_ * sizeof(Dtype))); // Copy the data. - memcpy(data_->mutable_cpu_data(), source.cpu_data(), count_ * sizeof(Dtype)); - memcpy(diff_->mutable_cpu_data(), source.cpu_diff(), count_ * sizeof(Dtype)); + memcpy(data_->mutable_cpu_data(), source.cpu_data(), + count_ * sizeof(Dtype)); + memcpy(diff_->mutable_cpu_data(), source.cpu_diff(), + count_ * sizeof(Dtype)); } } @@ -72,25 +75,25 @@ const Dtype* Blob::gpu_diff() const { template Dtype* Blob::mutable_cpu_data() { CHECK(data_); - return (Dtype*)data_->mutable_cpu_data(); + return reinterpret_cast(data_->mutable_cpu_data()); } template Dtype* Blob::mutable_gpu_data() { CHECK(data_); - return (Dtype*)data_->mutable_gpu_data(); + return reinterpret_cast(data_->mutable_gpu_data()); } template Dtype* Blob::mutable_cpu_diff() { CHECK(diff_); - return (Dtype*)diff_->mutable_cpu_data(); + return reinterpret_cast(diff_->mutable_cpu_data()); } template Dtype* Blob::mutable_gpu_diff() { CHECK(diff_); - return (Dtype*)diff_->mutable_gpu_data(); + return reinterpret_cast(diff_->mutable_gpu_data()); } template diff --git a/src/caffe/blob.hpp b/src/caffe/blob.hpp index 0b136d8..35e6d2c 100644 --- a/src/caffe/blob.hpp +++ b/src/caffe/blob.hpp @@ -14,11 +14,11 @@ class Blob { public: Blob() : num_(0), channels_(0), height_(0), width_(0), count_(0), data_(), - diff_() {}; + diff_() {} explicit Blob(const int num, const int channels, const int height, const int width); Blob(const Blob& source); - virtual ~Blob() {}; + virtual ~Blob() {} void Reshape(const int num, const int height, const int width, const int channels); inline int num() const { return num_; } diff --git a/src/caffe/common.hpp b/src/caffe/common.hpp index e3633ba..c9b9f65 100644 --- a/src/caffe/common.hpp +++ b/src/caffe/common.hpp @@ -7,11 +7,11 @@ #include #include #include +//cuda driver types +#include #include #include -#include "driver_types.h" - #define CUDA_CHECK(condition) CHECK_EQ((condition), cudaSuccess) #define CUBLAS_CHECK(condition) CHECK_EQ((condition), CUBLAS_STATUS_SUCCESS) #define CURAND_CHECK(condition) CHECK_EQ((condition), CURAND_STATUS_SUCCESS) diff --git a/src/caffe/filler.hpp b/src/caffe/filler.hpp index 388f284..024f8d7 100644 --- a/src/caffe/filler.hpp +++ b/src/caffe/filler.hpp @@ -8,10 +8,12 @@ #define CAFFE_FILLER_HPP #include +#include #include "caffe/common.hpp" #include "caffe/blob.hpp" #include "caffe/syncedmem.hpp" +#include "caffe/util/math_functions.hpp" #include "caffe/proto/layer_param.pb.h" namespace caffe { @@ -19,22 +21,19 @@ namespace caffe { template class Filler { public: - Filler(const FillerParameter& param) : filler_param_(param) {}; - virtual ~Filler() {}; + explicit Filler(const FillerParameter& param) : filler_param_(param) {} + virtual ~Filler() {} virtual void Fill(Blob* blob) = 0; protected: FillerParameter filler_param_; }; // class Filler -template -class FillerFactory { - -}; template class ConstantFiller : public Filler { public: - ConstantFiller(const FillerParameter& param) : Filler(param) {}; + explicit ConstantFiller(const FillerParameter& param) + : Filler(param) {} virtual void Fill(Blob* blob) { Dtype* data = blob->mutable_cpu_data(); const int count = blob->count(); @@ -49,53 +48,28 @@ class ConstantFiller : public Filler { template class UniformFiller : public Filler { public: - UniformFiller(const FillerParameter& param) : Filler(param) {}; + explicit UniformFiller(const FillerParameter& param) + : Filler(param) {} virtual void Fill(Blob* blob) { - void* data = (void*)(blob->mutable_cpu_data()); - const int count = blob->count(); - const Dtype value = this->filler_param_.value(); - CHECK(count); - switch(sizeof(Dtype)) { - case sizeof(float): - VSL_CHECK(vsRngUniform(VSL_RNG_METHOD_UNIFORM_STD, Caffe::vsl_stream(), - count, (float*)data, this->filler_param_.min(), - this->filler_param_.max())); - break; - case sizeof(double): - VSL_CHECK(vdRngUniform(VSL_RNG_METHOD_UNIFORM_STD, Caffe::vsl_stream(), - count, (double*)data, this->filler_param_.min(), - this->filler_param_.max())); - break; - default: - CHECK(false) << "Unknown dtype."; - } - }; + DCHECK(blob->count()); + caffe_vRngUniform(blob->count(), blob->mutable_cpu_data(), + Dtype(this->filler_param_.min()), + Dtype(this->filler_param_.max())); + } }; template class GaussianFiller : public Filler { public: - GaussianFiller(const FillerParameter& param) : Filler(param) {}; + explicit GaussianFiller(const FillerParameter& param) + : Filler(param) {} virtual void Fill(Blob* blob) { - void* data = (void*)(blob->mutable_cpu_data()); - const int count = blob->count(); - const Dtype value = this->filler_param_.value(); - CHECK(count); - switch(sizeof(Dtype)) { - case sizeof(float): - VSL_CHECK(vsRngGaussian(VSL_RNG_METHOD_GAUSSIAN_BOXMULLER, - Caffe::vsl_stream(), count, (float*)data, - this->filler_param_.mean(), this->filler_param_.std())); - break; - case sizeof(double): - VSL_CHECK(vdRngGaussian(VSL_RNG_METHOD_GAUSSIAN_BOXMULLER, - Caffe::vsl_stream(), count, (double*)data, - this->filler_param_.mean(), this->filler_param_.std())); - break; - default: - CHECK(false) << "Unknown dtype."; - } - }; + Dtype* data = blob->mutable_cpu_data(); + DCHECK(blob->count()); + caffe_vRngGaussian(blob->count(), blob->mutable_cpu_data(), + Dtype(this->filler_param_.mean()), + Dtype(this->filler_param_.std())); + } }; // A function to get a specific filler from the specification given in diff --git a/src/caffe/layer.hpp b/src/caffe/layer.hpp index 957b78c..130d3fb 100644 --- a/src/caffe/layer.hpp +++ b/src/caffe/layer.hpp @@ -15,12 +15,12 @@ namespace caffe { template class Layer { public: - // You should not implement your own constructor. Any set up code should go - // to SetUp(), where the dimensions of the bottom blobs are provided to the - // layer. + // You should not implement your own constructor. Any set up code should go + // to SetUp(), where the dimensions of the bottom blobs are provided to the + // layer. explicit Layer(const LayerParameter& param) - : layer_param_(param) {}; - virtual ~Layer() {}; + : layer_param_(param) {} + virtual ~Layer() {} // SetUp: your function should implement this. virtual void SetUp(const vector*>& bottom, vector*>* top) = 0; @@ -35,7 +35,9 @@ class Layer { vector*>* bottom); // Returns the vector of parameters. - vector >& params() { return blobs_; }; + vector >& params() { + return blobs_; + } protected: // The protobuf that stores the layer parameters @@ -73,7 +75,7 @@ class Layer { template inline void Layer::Forward(const vector*>& bottom, vector*>* top) { - switch(Caffe::mode()) { + switch (Caffe::mode()) { case Caffe::CPU: Forward_cpu(bottom, top); break; @@ -89,7 +91,7 @@ template inline Dtype Layer::Backward(const vector*>& top, const bool propagate_down, vector*>* bottom) { - switch(Caffe::mode()) { + switch (Caffe::mode()) { case Caffe::CPU: return Backward_cpu(top, propagate_down, bottom); case Caffe::GPU: diff --git a/src/caffe/layer_factory.hpp b/src/caffe/layer_factory.hpp index 06c9df5..4909d27 100644 --- a/src/caffe/layer_factory.hpp +++ b/src/caffe/layer_factory.hpp @@ -3,6 +3,8 @@ #ifndef CAFFE_LAYER_FACTORY_HPP_ #define CAFFE_LAYER_FACTORY_HPP_ +#include + #include "caffe/layer.hpp" #include "caffe/vision_layers.hpp" #include "caffe/proto/layer_param.pb.h" diff --git a/src/caffe/syncedmem.cpp b/src/caffe/syncedmem.cpp index cffb297..8e6996d 100644 --- a/src/caffe/syncedmem.cpp +++ b/src/caffe/syncedmem.cpp @@ -12,14 +12,14 @@ SyncedMemory::~SyncedMemory() { if (cpu_ptr_) { CUDA_CHECK(cudaFreeHost(cpu_ptr_)); } - + if (gpu_ptr_) { CUDA_CHECK(cudaFree(gpu_ptr_)); } } inline void SyncedMemory::to_cpu() { - switch(head_) { + switch (head_) { case UNINITIALIZED: CUDA_CHECK(cudaMallocHost(&cpu_ptr_, size_)); memset(cpu_ptr_, 0, size_); @@ -39,7 +39,7 @@ inline void SyncedMemory::to_cpu() { } inline void SyncedMemory::to_gpu() { - switch(head_) { + switch (head_) { case UNINITIALIZED: CUDA_CHECK(cudaMalloc(&gpu_ptr_, size_)); CUDA_CHECK(cudaMemset(gpu_ptr_, 0, size_)); diff --git a/src/caffe/syncedmem.hpp b/src/caffe/syncedmem.hpp index 4c56afd..9cf3b87 100644 --- a/src/caffe/syncedmem.hpp +++ b/src/caffe/syncedmem.hpp @@ -8,9 +8,9 @@ namespace caffe { class SyncedMemory { public: SyncedMemory() - : cpu_ptr_(NULL), gpu_ptr_(NULL), size_(0), head_(UNINITIALIZED) {}; + : cpu_ptr_(NULL), gpu_ptr_(NULL), size_(0), head_(UNINITIALIZED) {} explicit SyncedMemory(size_t size) - : cpu_ptr_(NULL), gpu_ptr_(NULL), size_(size), head_(UNINITIALIZED) {}; + : cpu_ptr_(NULL), gpu_ptr_(NULL), size_(size), head_(UNINITIALIZED) {} ~SyncedMemory(); const void* cpu_data(); const void* gpu_data(); diff --git a/src/caffe/util/math_functions.cpp b/src/caffe/util/math_functions.cpp index 0c33c65..5a44468 100644 --- a/src/caffe/util/math_functions.cpp +++ b/src/caffe/util/math_functions.cpp @@ -157,4 +157,34 @@ template <> void caffe_powx(const int n, const double* a, const double b, double* y) { vdPowx(n, a, b, y); } +template <> +void caffe_vRngUniform(const int n, float* r, + const float a, const float b) { + VSL_CHECK(vsRngUniform(VSL_RNG_METHOD_UNIFORM_STD, Caffe::vsl_stream(), + n, r, a, b)); +} + +template <> +void caffe_vRngUniform(const int n, double* r, + const double a, const double b) { + VSL_CHECK(vdRngUniform(VSL_RNG_METHOD_UNIFORM_STD, Caffe::vsl_stream(), + n, r, a, b)); +} + +template <> +void caffe_vRngGaussian(const int n, float* r, const float a, + const float sigma){ + VSL_CHECK(vsRngGaussian(VSL_RNG_METHOD_GAUSSIAN_BOXMULLER, + Caffe::vsl_stream(), n, r, a, sigma)); +} + + +template <> +void caffe_vRngGaussian(const int n, double* r, const double a, + const double sigma){ + VSL_CHECK(vdRngGaussian(VSL_RNG_METHOD_GAUSSIAN_BOXMULLER, + Caffe::vsl_stream(), n, r, a, sigma)); +} + + } // namespace caffe diff --git a/src/caffe/util/math_functions.hpp b/src/caffe/util/math_functions.hpp index 42304ba..0c03c59 100644 --- a/src/caffe/util/math_functions.hpp +++ b/src/caffe/util/math_functions.hpp @@ -57,6 +57,13 @@ void caffe_div(const int N, const Dtype* a, const Dtype* b, Dtype* y); template void caffe_powx(const int n, const Dtype* a, const Dtype b, Dtype* y); +template +void caffe_vRngUniform(const int n, Dtype* r, const Dtype a, const Dtype b); + +template +void caffe_vRngGaussian(const int n, Dtype* r, const Dtype a, + const Dtype sigma); + } // namespace caffe -- 2.7.4