template <typename Dtype>
Dtype Im2colLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const bool propagate_down, vector<Blob<Dtype>*>* bottom) {
+ LOG(ERROR) << "Warning: still CPU version";
+ return Backward_cpu(top, propagate_down, bottom);
+ /*
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff();
for (int n = 0; n < top[0]->num(); ++n) {
WIDTH_, KSIZE_, STRIDE_, bottom_diff + (*bottom)[0]->offset(n));
}
return Dtype(0.);
+ */
}
INSTANTIATE_CLASS(Im2colLayer);
GradientChecker<TypeParam> checker(1e-2, 1e-2);
checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_, this->blob_top_vec_);
}
-/*
+
TYPED_TEST(Im2colLayerTest, TestGPUGradient) {
LayerParameter layer_param;
layer_param.set_kernelsize(3);
GradientChecker<TypeParam> checker(1e-2, 1e-2);
checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_, this->blob_top_vec_);
}
-*/
+
}
double* data_col);
+/*
// A bunch of stuff dealing with double atomic add
template <typename Dtype>
__device__ inline Dtype MyAtomicAdd(Dtype* address, Dtype val);
return __longlong_as_double(old);
}
-
-
template <typename Dtype>
__global__ void col2im_gpu_kernel(const int n, const Dtype* data_col,
const int height, const int width, const int ksize,
template void col2im_gpu<double>(const double* data_col, const int channels,
const int height, const int width, const int psize, const int stride,
double* data_im);
+*/
} // namespace caffeine
const int height, const int width, const int ksize, const int stride,
Dtype* data_col);
+/*
template <typename Dtype>
void col2im_gpu(const Dtype* data_col, const int channels,
const int height, const int width, const int psize, const int stride,
Dtype* data_im);
-
+*/
} // namespace caffeine
#endif // CAFFEINE_UTIL_IM2COL_HPP_