const int width = transformed_blob->width();
CHECK_GT(datum_num, 0) << "There is no datum to add";
- CHECK_LE(datum_num, num) <<
- "The size of datum_vector must be smaller than transformed_blob->num()";
+ CHECK_EQ(datum_num, num) <<
+ "The size of datum_vector must be equals to transformed_blob->num()";
Blob<Dtype> uni_blob(1, channels, height, width);
for (int item_id = 0; item_id < datum_num; ++item_id) {
int offset = transformed_blob->offset(item_id);
}
}
+#ifndef OSX
template<typename Dtype>
void DataTransformer<Dtype>::Transform(const vector<cv::Mat> & mat_vector,
Blob<Dtype>* transformed_blob) {
const int width = transformed_blob->width();
CHECK_GT(mat_num, 0) << "There is no MAT to add";
- CHECK_LE(mat_num, num) <<
- "The size of mat_vector must be smaller than transformed_blob->num()";
+ CHECK_EQ(mat_num, num) <<
+ "The size of mat_vector must be equals to transformed_blob->num()";
Blob<Dtype> uni_blob(1, channels, height, width);
for (int item_id = 0; item_id < mat_num; ++item_id) {
int offset = transformed_blob->offset(item_id);
Transform(mat_vector[item_id], &uni_blob);
}
}
+#endif
template<typename Dtype>
void DataTransformer<Dtype>::Transform(const cv::Mat& cv_img,
added_label_.Reshape(batch_size_, 1, 1, 1);
data_ = NULL;
labels_ = NULL;
+ needs_reshape_ = false;
added_data_.cpu_data();
added_label_.cpu_data();
}
CHECK(!has_new_data_) <<
"Can't add Datum when earlier ones haven't been consumed"
<< " by the upper layers";
-
size_t num = datum_vector.size();
- if (batch_size_ != num) {
- needs_reshape_ = true;
- batch_size_ = num;
- added_data_.Reshape(batch_size_, channels_, height_, width_);
- added_label_.Reshape(batch_size_, 1, 1, 1);
- }
-
CHECK_GT(num, 0) << "There is no datum to add";
- CHECK_LE(num, batch_size_) <<
- "The number of added datum must be no greater than the batch size";
-
+ CHECK_LE(num % batch_size_, 0) <<
+ "The number of added datum must be multiple of the batch size";
+ if (num > batch_size_) {
+ added_data_.Reshape(num, channels_, height_, width_);
+ added_label_.Reshape(num, 1, 1, 1);
+ }
// Apply data transformations (mirror, scale, crop...)
this->data_transformer_.Transform(datum_vector, &added_data_);
// Copy Labels
}
// num_images == batch_size_
Dtype* top_data = added_data_.mutable_cpu_data();
- Reset(top_data, top_label, batch_size_);
+ Reset(top_data, top_label, num);
has_new_data_ = true;
}
template <typename Dtype>
void MemoryDataLayer<Dtype>::AddMatVector(const vector<cv::Mat>& mat_vector,
const vector<int>& labels) {
-
+ size_t num = mat_vector.size();
CHECK(!has_new_data_) <<
"Can't add Mat when earlier ones haven't been consumed"
<< " by the upper layers";
-
- CHECK_EQ(mat_vector.size(), labels.size()) <<
- "vector of labels and vector of mats need to be of the same size";
-
- size_t num = mat_vector.size();
- if (batch_size_ != num) {
- needs_reshape_ = true;
- batch_size_ = num;
- added_data_.Reshape(batch_size_, channels_, height_, width_);
- added_label_.Reshape(batch_size_, 1, 1, 1);
- }
-
+ CHECK_LE(num % batch_size_, 0) <<
+ "The number of added datum must be multiple of the batch size";
CHECK_GT(num, 0) << "There is no mat to add";
- CHECK_LE(num, batch_size_) <<
- "The number of added mat must be no greater than the batch size";
-
+ if (num > batch_size_) {
+ added_data_.Reshape(num, channels_, height_, width_);
+ added_label_.Reshape(num, 1, 1, 1);
+ }
// Apply data transformations (mirror, scale, crop...)
this->data_transformer_.Transform(mat_vector, &added_data_);
// Copy Labels
}
// num_images == batch_size_
Dtype* top_data = added_data_.mutable_cpu_data();
- Reset(top_data, top_label, batch_size_);
+ Reset(top_data, top_label, num);
has_new_data_ = true;
}
}
template <typename Dtype>
+void MemoryDataLayer<Dtype>::ChangeBatchSize(int new_size) {
+ CHECK(!has_new_data_) <<
+ "Can't change batch_size before all data haven't been consumed"
+ << " by the upper layers";
+ batch_size_ = new_size;
+ added_data_.Reshape(batch_size_, channels_, height_, width_);
+ added_label_.Reshape(batch_size_, 1, 1, 1);
+ needs_reshape_ = true;
+}
+
+template <typename Dtype>
void MemoryDataLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
CHECK(data_) << "MemoryDataLayer needs to be initalized by calling Reset";
if (needs_reshape_) {
top[0]->Reshape(batch_size_, channels_, height_, width_);
top[1]->Reshape(batch_size_, 1, 1, 1);
+ needs_reshape_ = false;
}
top[0]->set_cpu_data(data_ + pos_ * size_);
top[1]->set_cpu_data(labels_ + pos_);
pos_ = (pos_ + batch_size_) % n_;
- has_new_data_ = false;
- needs_reshape_ = false;
+ if (pos_ == 0)
+ has_new_data_ = false;
}
INSTANTIATE_CLASS(MemoryDataLayer);
memory_data_param->set_width(this->width_);
MemoryDataLayer<Dtype> layer(param);
layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
-
- vector<Datum> datum_vector(this->batch_size_);
+ // We add batch_size*num_iter items, then for each iteration
+ // we forward batch_size elements
+ int num_iter = 5;
+ vector<Datum> datum_vector(this->batch_size_ * num_iter);
const size_t count = this->channels_ * this->height_ * this->width_;
size_t pixel_index = 0;
- for (int i = 0; i < this->batch_size_; ++i) {
+ for (int i = 0; i < this->batch_size_ * num_iter; ++i) {
datum_vector[i].set_channels(this->channels_);
datum_vector[i].set_height(this->height_);
datum_vector[i].set_width(this->width_);
}
datum_vector[i].set_data(&(pixels[0]), count);
}
-
layer.AddDatumVector(datum_vector);
int data_index;
// Go through the data 5 times
- for (int iter = 0; iter < 5; ++iter) {
+ for (int iter = 0; iter < num_iter; ++iter) {
+ int offset = this->batch_size_ * iter;
layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_);
const Dtype* data = this->data_blob_->cpu_data();
size_t index = 0;
for (int i = 0; i < this->batch_size_; ++i) {
- const string& data_string = datum_vector[i].data();
- EXPECT_EQ(i, this->label_blob_->cpu_data()[i]);
+ const string& data_string = datum_vector[offset + i].data();
+ EXPECT_EQ(offset + i, this->label_blob_->cpu_data()[i]);
for (int c = 0; c < this->channels_; ++c) {
for (int h = 0; h < this->height_; ++h) {
for (int w = 0; w < this->width_; ++w) {
TYPED_TEST(MemoryDataLayerTest, AddMatVectorDefaultTransform) {
typedef typename TypeParam::Dtype Dtype;
+ LayerParameter param;
+ MemoryDataParameter* memory_data_param = param.mutable_memory_data_param();
+ memory_data_param->set_batch_size(this->batch_size_);
+ memory_data_param->set_channels(this->channels_);
+ memory_data_param->set_height(this->height_);
+ memory_data_param->set_width(this->width_);
+ MemoryDataLayer<Dtype> layer(param);
+ layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
+ // We add batch_size*num_iter items, then for each iteration
+ // we forward batch_size elements
+ int num_iter = 5;
+ vector<cv::Mat> mat_vector(this->batch_size_ * num_iter);
+ vector<int> label_vector(this->batch_size_ * num_iter);
+ for (int i = 0; i < this->batch_size_*num_iter; ++i) {
+ mat_vector[i] = cv::Mat(this->height_, this->width_, CV_8UC4);
+ label_vector[i] = i;
+ cv::randu(mat_vector[i], cv::Scalar::all(0), cv::Scalar::all(255));
+ }
+ layer.AddMatVector(mat_vector, label_vector);
+ int data_index;
+ const size_t count = this->channels_ * this->height_ * this->width_;
+ for (int iter = 0; iter < num_iter; ++iter) {
+ int offset = this->batch_size_ * iter;
+ layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_);
+ const Dtype* data = this->data_blob_->cpu_data();
+ for (int i = 0; i < this->batch_size_; ++i) {
+ EXPECT_EQ(offset + i, this->label_blob_->cpu_data()[i]);
+ for (int h = 0; h < this->height_; ++h) {
+ const unsigned char* ptr_mat = mat_vector[offset + i].ptr<uchar>(h);
+ int index = 0;
+ for (int w = 0; w < this->width_; ++w) {
+ for (int c = 0; c < this->channels_; ++c) {
+ data_index = (i*count) + (c * this->height_ + h) * this->width_ + w;
+ Dtype pixel = static_cast<Dtype>(ptr_mat[index++]);
+ EXPECT_EQ(static_cast<int>(pixel),
+ data[data_index]);
+ }
+ }
+ }
+ }
+ }
+}
+
+TYPED_TEST(MemoryDataLayerTest, TestChangeBatchSize) {
+ typedef typename TypeParam::Dtype Dtype;
LayerParameter param;
MemoryDataParameter* memory_data_param = param.mutable_memory_data_param();
memory_data_param->set_batch_size(this->batch_size_);
memory_data_param->set_width(this->width_);
MemoryDataLayer<Dtype> layer(param);
layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
- vector<cv::Mat> mat_vector(this->batch_size_);
- vector<int> label_vector(this->batch_size_);
- for (int i = 0; i < this->batch_size_; ++i) {
+ // first add data as usual
+ int num_iter = 5;
+ vector<cv::Mat> mat_vector(this->batch_size_ * num_iter);
+ vector<int> label_vector(this->batch_size_ * num_iter);
+ for (int i = 0; i < this->batch_size_*num_iter; ++i) {
mat_vector[i] = cv::Mat(this->height_, this->width_, CV_8UC4);
label_vector[i] = i;
cv::randu(mat_vector[i], cv::Scalar::all(0), cv::Scalar::all(255));
}
layer.AddMatVector(mat_vector, label_vector);
+ // then consume the data
int data_index;
const size_t count = this->channels_ * this->height_ * this->width_;
- for (int iter = 0; iter < 5; ++iter) {
+ for (int iter = 0; iter < num_iter; ++iter) {
+ int offset = this->batch_size_ * iter;
layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_);
const Dtype* data = this->data_blob_->cpu_data();
for (int i = 0; i < this->batch_size_; ++i) {
- EXPECT_EQ(i, this->label_blob_->cpu_data()[i]);
+ EXPECT_EQ(offset + i, this->label_blob_->cpu_data()[i]);
+ for (int h = 0; h < this->height_; ++h) {
+ const unsigned char* ptr_mat = mat_vector[offset + i].ptr<uchar>(h);
+ int index = 0;
+ for (int w = 0; w < this->width_; ++w) {
+ for (int c = 0; c < this->channels_; ++c) {
+ data_index = (i*count) + (c * this->height_ + h) * this->width_ + w;
+ Dtype pixel = static_cast<Dtype>(ptr_mat[index++]);
+ EXPECT_EQ(static_cast<int>(pixel),
+ data[data_index]);
+ }
+ }
+ }
+ }
+ }
+ // and then add new data with different batch_size
+ int new_batch_size = 16;
+ layer.ChangeBatchSize(new_batch_size);
+ mat_vector.clear();
+ mat_vector.resize(new_batch_size * num_iter);
+ label_vector.clear();
+ label_vector.resize(new_batch_size * num_iter);
+ for (int i = 0; i < new_batch_size*num_iter; ++i) {
+ mat_vector[i] = cv::Mat(this->height_, this->width_, CV_8UC4);
+ label_vector[i] = i;
+ cv::randu(mat_vector[i], cv::Scalar::all(0), cv::Scalar::all(255));
+ }
+ layer.AddMatVector(mat_vector, label_vector);
+
+ // finally consume new data and check if everything is fine
+ for (int iter = 0; iter < num_iter; ++iter) {
+ int offset = new_batch_size * iter;
+ layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_);
+ EXPECT_EQ(new_batch_size, this->blob_top_vec_[0]->num());
+ EXPECT_EQ(new_batch_size, this->blob_top_vec_[1]->num());
+ const Dtype* data = this->data_blob_->cpu_data();
+ for (int i = 0; i < new_batch_size; ++i) {
+ EXPECT_EQ(offset + i, this->label_blob_->cpu_data()[i]);
for (int h = 0; h < this->height_; ++h) {
- const unsigned char* ptr_mat = mat_vector[i].ptr<uchar>(h);
+ const unsigned char* ptr_mat = mat_vector[offset + i].ptr<uchar>(h);
int index = 0;
for (int w = 0; w < this->width_; ++w) {
for (int c = 0; c < this->channels_; ++c) {