replace softmax cudaMemcpy with caffe_gpu_copy
authorEvan Shelhamer <shelhamer@imaginarynumber.net>
Sat, 28 Jun 2014 01:38:39 +0000 (18:38 -0700)
committerEvan Shelhamer <shelhamer@imaginarynumber.net>
Fri, 4 Jul 2014 00:14:11 +0000 (17:14 -0700)
src/caffe/layers/softmax_layer.cu

index f53883c..5ec4412 100644 (file)
@@ -50,8 +50,7 @@ Dtype SoftmaxLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
   Dtype* scale_data = scale_.mutable_gpu_data();
   int num = bottom[0]->num();
   int dim = bottom[0]->count() / bottom[0]->num();
-  CUDA_CHECK(cudaMemcpy(top_data, bottom_data,
-      sizeof(Dtype) * bottom[0]->count(), cudaMemcpyDeviceToDevice));
+  caffe_gpu_copy(bottom[0]->count(), bottom_data, top_data);
   // we need to subtract the max to avoid numerical issues, compute the exp,
   // and then normalize.
   // Compute max
@@ -85,8 +84,7 @@ void SoftmaxLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
   Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff();
   int num = top[0]->num();
   int dim = top[0]->count() / top[0]->num();
-  CUDA_CHECK(cudaMemcpy(bottom_diff, top_diff,
-      sizeof(Dtype) * top[0]->count(), cudaMemcpyDeviceToDevice));
+  caffe_gpu_copy(top[0]->count(), top_diff, bottom_diff);
   // Compute inner1d(top_diff, top_data) and subtract them from the bottom diff
   // cuda dot returns the result to cpu, so we temporarily change the pointer
   // mode