Merge pull request #217 from tdomhan/multidhdf5
[platform/upstream/caffeonacl.git] / src / caffe / layers / hdf5_data_layer.cu
1 // Copyright Sergey Karayev 2014
2 /*
3 TODO:
4 - only load parts of the file, in accordance with a prototxt param "max_mem"
5 */
6
7 #include <stdint.h>
8 #include <string>
9 #include <vector>
10
11 #include "hdf5.h"
12 #include "hdf5_hl.h"
13
14 #include "caffe/layer.hpp"
15 #include "caffe/util/io.hpp"
16 #include "caffe/vision_layers.hpp"
17
18 using std::string;
19
20 namespace caffe {
21
22 template <typename Dtype>
23 void HDF5DataLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
24       vector<Blob<Dtype>*>* top) {
25   const int batchsize = this->layer_param_.batchsize();
26   const int data_count = (*top)[0]->count() / (*top)[0]->num();
27   const int label_data_count = (*top)[1]->count() / (*top)[1]->num();
28
29   for (int i = 0; i < batchsize; ++i, ++current_row_) {
30     if (current_row_ == data_dims_[0]) {
31       current_row_ = 0;
32     }
33
34     CUDA_CHECK(cudaMemcpy(
35             &(*top)[0]->mutable_gpu_data()[i * data_count],
36             &(data_.get()[current_row_ * data_count]),
37             sizeof(Dtype) * data_count,
38             cudaMemcpyHostToDevice));
39
40     CUDA_CHECK(cudaMemcpy(
41             &(*top)[1]->mutable_gpu_data()[i * label_data_count],
42             &(label_.get()[current_row_ * label_data_count]),
43             sizeof(Dtype) * label_data_count,
44             cudaMemcpyHostToDevice));
45   }
46 }
47
48 template <typename Dtype>
49 Dtype HDF5DataLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
50       const bool propagate_down, vector<Blob<Dtype>*>* bottom) {
51   return Dtype(0.);
52 }
53
54 INSTANTIATE_CLASS(HDF5DataLayer);
55
56 }  // namespace caffe