virtual inline int ExactNumTopBlobs() const { return 2; }
virtual void AddDatumVector(const vector<Datum>& datum_vector);
+ virtual void AddMatVector(const vector<cv::Mat>& mat_vector,
+ const vector<int>& labels);
// Reset should accept const pointers, but can't, because the memory
// will be given to Blob, which is mutable
/**
* @brief Applies the transformation defined in the data layer's
+ * transform_param block to a vector of Mat.
+ *
+ * @param mat_vector
+ * A vector of Mat containing the data to be transformed.
+ * @param transformed_blob
+ * This is destination blob. It can be part of top blob's data if
+ * set_cpu_data() is used. See memory_layer.cpp for an example.
+ */
+ void Transform(const vector<cv::Mat> & mat_vector,
+ Blob<Dtype>* transformed_blob);
+
+ /**
+ * @brief Applies the transformation defined in the data layer's
* transform_param block to a cv::Mat
*
* @param cv_img
}
template<typename Dtype>
+void DataTransformer<Dtype>::Transform(const vector<cv::Mat> & mat_vector,
+ Blob<Dtype>* transformed_blob) {
+ const int mat_num = mat_vector.size();
+ const int num = transformed_blob->num();
+ const int channels = transformed_blob->channels();
+ const int height = transformed_blob->height();
+ const int width = transformed_blob->width();
+
+ CHECK_GT(mat_num, 0) << "There is no MAT to add";
+ CHECK_LE(mat_num, num) <<
+ "The size of mat_vector must be smaller than transformed_blob->num()";
+ Blob<Dtype> uni_blob(1, channels, height, width);
+ for (int item_id = 0; item_id < mat_num; ++item_id) {
+ int offset = transformed_blob->offset(item_id);
+ uni_blob.set_cpu_data(transformed_blob->mutable_cpu_data() + offset);
+ Transform(mat_vector[item_id], &uni_blob);
+ }
+}
+
+template<typename Dtype>
void DataTransformer<Dtype>::Transform(const cv::Mat& cv_img,
Blob<Dtype>* transformed_blob) {
const int img_channels = cv_img.channels();
}
template <typename Dtype>
+void MemoryDataLayer<Dtype>::AddMatVector(const vector<cv::Mat>& mat_vector,
+ const vector<int>& labels) {
+ CHECK(!has_new_data_) <<
+ "Can't add Mat when earlier ones haven't been consumed"
+ << " by the upper layers";
+ size_t num = mat_vector.size();
+ CHECK_GT(num, 0) << "There is no mat to add";
+ CHECK_LE(num, batch_size_) <<
+ "The number of added mat must be no greater than the batch size";
+
+ // Apply data transformations (mirror, scale, crop...)
+ this->data_transformer_.Transform(mat_vector, &added_data_);
+ // Copy Labels
+ Dtype* top_label = added_label_.mutable_cpu_data();
+ for (int item_id = 0; item_id < num; ++item_id) {
+ top_label[item_id] = labels[item_id];
+ }
+ // num_images == batch_size_
+ Dtype* top_data = added_data_.mutable_cpu_data();
+ Reset(top_data, top_label, batch_size_);
+ has_new_data_ = true;
+}
+
+template <typename Dtype>
void MemoryDataLayer<Dtype>::Reset(Dtype* data, Dtype* labels, int n) {
CHECK(data);
CHECK(labels);
const size_t count = this->channels_ * this->height_ * this->width_;
size_t pixel_index = 0;
for (int i = 0; i < this->batch_size_; ++i) {
- LOG(ERROR) << "i " << i;
datum_vector[i].set_channels(this->channels_);
datum_vector[i].set_height(this->height_);
datum_vector[i].set_width(this->width_);
}
}
+TYPED_TEST(MemoryDataLayerTest, AddMatVectorDefaultTransform) {
+ typedef typename TypeParam::Dtype Dtype;
+
+ LayerParameter param;
+ MemoryDataParameter* memory_data_param = param.mutable_memory_data_param();
+ memory_data_param->set_batch_size(this->batch_size_);
+ memory_data_param->set_channels(this->channels_);
+ memory_data_param->set_height(this->height_);
+ memory_data_param->set_width(this->width_);
+ MemoryDataLayer<Dtype> layer(param);
+ layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
+ vector<cv::Mat> mat_vector(this->batch_size_);
+ vector<int> label_vector(this->batch_size_);
+ for (int i = 0; i < this->batch_size_; ++i) {
+ mat_vector[i] = cv::Mat(this->height_, this->width_, CV_8UC4);
+ label_vector[i] = i;
+ cv::randu(mat_vector[i], cv::Scalar::all(0), cv::Scalar::all(255));
+ }
+ layer.AddMatVector(mat_vector, label_vector);
+ int data_index;
+ const size_t count = this->channels_ * this->height_ * this->width_;
+ for (int iter = 0; iter < 5; ++iter) {
+ layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_);
+ const Dtype* data = this->data_blob_->cpu_data();
+ for (int i = 0; i < this->batch_size_; ++i) {
+ EXPECT_EQ(i, this->label_blob_->cpu_data()[i]);
+ for (int h = 0; h < this->height_; ++h) {
+ const unsigned char* ptr_mat = mat_vector[i].ptr<uchar>(h);
+ int index = 0;
+ for (int w = 0; w < this->width_; ++w) {
+ for (int c = 0; c < this->channels_; ++c) {
+ data_index = (i*count) + (c * this->height_ + h) * this->width_ + w;
+ Dtype pixel = static_cast<Dtype>(ptr_mat[index++]);
+ EXPECT_EQ(static_cast<int>(pixel),
+ data[data_index]);
+ }
+ }
+ }
+ }
+ }
+}
+
} // namespace caffe