data_.reset(new SyncedMemory(count_ * sizeof(Dtype)));
diff_.reset(new SyncedMemory(count_ * sizeof(Dtype)));
} else {
- data_.reset((SyncedMemory*)NULL);
- diff_.reset((SyncedMemory*)NULL);
+ data_.reset(reinterpret_cast<SyncedMemory*>(NULL));
+ diff_.reset(reinterpret_cast<SyncedMemory*>(NULL));
}
}
#include "caffe/proto/caffe.pb.h"
-#endif // CAFFE_CAFFE_HPP_
\ No newline at end of file
+#endif // CAFFE_CAFFE_HPP_
CURAND_CHECK(curandCreateGenerator(&Get().curand_generator_,
CURAND_RNG_PSEUDO_DEFAULT));
CURAND_CHECK(curandSetPseudoRandomGeneratorSeed(curand_generator(),
- (unsigned long long)seed));
+ seed));
// VSL seed
VSL_CHECK(vslDeleteStream(&(Get().vsl_stream_)));
VSL_CHECK(vslNewStream(&(Get().vsl_stream_), VSL_BRNG_MT19937, seed));
}
#define DISABLE_COPY_AND_ASSIGN(classname) \
- private:\
+private:\
classname(const classname&);\
classname& operator=(const classname&)
int fan_in = blob->width();
Dtype scale = sqrt(Dtype(3) / fan_in);
caffe_vRngUniform<Dtype>(blob->count(), blob->mutable_cpu_data(),
- -scale, scale);
+ -scale, scale);
}
};
// Copyright 2013 Yangqing Jia
#include <stdint.h>
+#include <leveldb/db.h>
+
#include <string>
#include <vector>
-#include <leveldb/db.h>
-
#include "caffe/layer.hpp"
#include "caffe/vision_layers.hpp"
// Copyright 2013 Yangqing Jia
-#include <vector>
#include <mkl.h>
#include <cublas_v2.h>
+#include <vector>
+
#include "caffe/blob.hpp"
#include "caffe/common.hpp"
#include "caffe/filler.hpp"
GetFiller<Dtype>(this->layer_param_.bias_filler()));
bias_filler->Fill(this->blobs_[1].get());
bias_multiplier_.reset(new SyncedMemory(M_ * sizeof(Dtype)));
- Dtype* bias_multiplier_data = (Dtype*)bias_multiplier_->mutable_cpu_data();
+ Dtype* bias_multiplier_data =
+ reinterpret_cast<Dtype*>(bias_multiplier_->mutable_cpu_data());
for (int i = 0; i < M_; ++i) {
bias_multiplier_data[i] = 1.;
}
// Copyright 2013 Yangqing Jia
#include <algorithm>
+#include <vector>
#include "caffe/layer.hpp"
#include "caffe/vision_layers.hpp"
return Dtype(0);
}
-// TODO: implement the GPU version of softmax.
+// TODO(Yangqing): implement the GPU version of softmax.
INSTANTIATE_CLASS(SoftmaxLayer);
+// Copyright 2013 Yangqing Jia
+
#include <stdint.h>
-#include <string>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
+#include <algorithm>
+#include <string>
+
#include "caffe/common.hpp"
#include "caffe/util/io.hpp"
#include "caffe/proto/caffe.pb.h"
using cv::Mat;
using cv::Vec3b;
+using std::max;
using std::string;
namespace caffe {
void ReadImageToProto(const string& filename, BlobProto* proto) {
- Mat cv_image;
- cv_image = cv::imread(filename, CV_LOAD_IMAGE_COLOR);
- CHECK(cv_image.data) << "Could not open or find the image.";
- DCHECK_EQ(cv_image.channels(), 3);
+ Mat cv_img;
+ cv_img = cv::imread(filename, CV_LOAD_IMAGE_COLOR);
+ CHECK(cv_img.data) << "Could not open or find the image.";
+ DCHECK_EQ(cv_img.channels(), 3);
proto->set_num(1);
proto->set_channels(3);
- proto->set_height(cv_image.rows);
- proto->set_width(cv_image.cols);
+ proto->set_height(cv_img.rows);
+ proto->set_width(cv_img.cols);
proto->clear_data();
proto->clear_diff();
for (int c = 0; c < 3; ++c) {
- for (int h = 0; h < cv_image.rows; ++h) {
- for (int w = 0; w < cv_image.cols; ++w) {
- proto->add_data(float(cv_image.at<Vec3b>(h, w)[c]) / 255.);
+ for (int h = 0; h < cv_img.rows; ++h) {
+ for (int w = 0; w < cv_img.cols; ++w) {
+ proto->add_data(static_cast<float>(cv_img.at<Vec3b>(h, w)[c]) / 255.);
}
}
}
void WriteProtoToImage(const string& filename, const BlobProto& proto) {
CHECK_EQ(proto.num(), 1);
- CHECK_EQ(proto.channels(), 3);
+ CHECK(proto.channels() == 3 || proto.channels() == 1);
CHECK_GT(proto.height(), 0);
CHECK_GT(proto.width(), 0);
- Mat cv_image(proto.height(), proto.width(), CV_8UC3);
- // TODO: copy the blob data to image.
+ Mat cv_img(proto.height(), proto.width(), CV_8UC3);
for (int c = 0; c < 3; ++c) {
- for (int h = 0; h < cv_image.rows; ++h) {
- for (int w = 0; w < cv_image.cols; ++w) {
- cv_image.at<Vec3b>(h, w)[c] =
- uint8_t(proto.data((c * cv_image.rows + h) * cv_image.cols + w)
+ int source_c = max(c, proto.channels() - 1);
+ for (int h = 0; h < cv_img.rows; ++h) {
+ for (int w = 0; w < cv_img.cols; ++w) {
+ cv_img.at<Vec3b>(h, w)[c] =
+ uint8_t(proto.data((source_c * cv_img.rows + h) * cv_img.cols + w)
* 255.);
}
}
}
- CHECK(cv::imwrite(filename, cv_image));
+ CHECK(cv::imwrite(filename, cv_img));
}
+// Copyright Yangqing Jia 2013
+
#ifndef CAFFE_UTIL_IO_H_
#define CAFFE_UTIL_IO_H_
} // namespace caffe
-#endif // CAFFE_UTIL_IO_H_
\ No newline at end of file
+#endif // CAFFE_UTIL_IO_H_
protected:
virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top);
- //virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
- // vector<Blob<Dtype>*>* top);
+ // virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
+ // vector<Blob<Dtype>*>* top);
virtual Dtype Backward_cpu(const vector<Blob<Dtype>*>& top,
const bool propagate_down, vector<Blob<Dtype>*>* bottom);
- //virtual Dtype Backward_gpu(const vector<Blob<Dtype>*>& top,
- // const bool propagate_down, vector<Blob<Dtype>*>* bottom);
+ // virtual Dtype Backward_gpu(const vector<Blob<Dtype>*>& top,
+ // const bool propagate_down, vector<Blob<Dtype>*>* bottom);
// sum_multiplier is just used to carry out sum using blas
Blob<Dtype> sum_multiplier_;
vector<Blob<Dtype>*>* top) { return; }
virtual Dtype Backward_cpu(const vector<Blob<Dtype>*>& top,
const bool propagate_down, vector<Blob<Dtype>*>* bottom);
- //virtual Dtype Backward_gpu(const vector<Blob<Dtype>*>& top,
- // const bool propagate_down, vector<Blob<Dtype>*>* bottom);
+ // virtual Dtype Backward_gpu(const vector<Blob<Dtype>*>& top,
+ // const bool propagate_down, vector<Blob<Dtype>*>* bottom);
};
} // namespace caffe