static_cast<const unsigned int*>(rand_vec_->gpu_data());
const int count = (*bottom)[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
- DropoutBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
+ DropoutBackward<Dtype><<<CAFFE_GET_BLOCKS(count),
+ CAFFE_CUDA_NUM_THREADS>>>(
count, top_diff, mask, uint_thres_, scale_, bottom_diff);
CUDA_POST_KERNEL_CHECK;
} else {
&(this->blob_top_vec_));
}
+TYPED_TEST(NeuronLayerTest, TestDropoutGradientCPUTest) {
+ LayerParameter layer_param;
+ Caffe::set_mode(Caffe::CPU);
+ Caffe::set_phase(Caffe::TEST);
+ DropoutLayer<TypeParam> layer(layer_param);
+ GradientChecker<TypeParam> checker(1e-2, 1e-3);
+ checker.CheckGradientEltwise(&layer, &(this->blob_bottom_vec_),
+ &(this->blob_top_vec_));
+}
TYPED_TEST(NeuronLayerTest, TestDropoutCPUTestPhase) {
LayerParameter layer_param;
}
}
+TYPED_TEST(NeuronLayerTest, TestDropoutGradientGPUTest) {
+ if (CAFFE_TEST_CUDA_PROP.major >= 2) {
+ LayerParameter layer_param;
+ Caffe::set_mode(Caffe::GPU);
+ Caffe::set_phase(Caffe::TEST);
+ DropoutLayer<TypeParam> layer(layer_param);
+ GradientChecker<TypeParam> checker(1e-2, 1e-3);
+ // it is too expensive to call curand multiple times, so we don't do an
+ // exhaustive gradient check.
+ checker.CheckGradient(&layer, &(this->blob_bottom_vec_),
+ &(this->blob_top_vec_));
+ } else {
+ LOG(ERROR) << "Skipping test to spare my laptop.";
+ }
+}
+
TYPED_TEST(NeuronLayerTest, TestDropoutGPUTestPhase) {
LayerParameter layer_param;