From 61cc701dd7b02237e75062ae5238d9265a55bf34 Mon Sep 17 00:00:00 2001 From: Dmytro Dzhulgakov Date: Fri, 21 Dec 2018 08:13:15 -0800 Subject: [PATCH] Fix cudnn dropout (#15473) Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/15473 Revert accidental changes introduced in D13335176 IntList is a range and copying it just copies pointers. Thus pointers would point either on deallocated memory or on the same memory causing equality always pass. Reviewed By: ezyang Differential Revision: D13537131 fbshipit-source-id: c97b3533be689bb4cdadd9e612f1284ac50e4bda --- caffe2/operators/dropout_op_cudnn.cc | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/caffe2/operators/dropout_op_cudnn.cc b/caffe2/operators/dropout_op_cudnn.cc index 2387b7b..ffec6b1 100644 --- a/caffe2/operators/dropout_op_cudnn.cc +++ b/caffe2/operators/dropout_op_cudnn.cc @@ -55,7 +55,7 @@ class CuDNNDropoutOp final : public Operator { cudnnTensorDescriptor_t data_desc_; cudnnDropoutDescriptor_t dropout_desc_; - at::IntList cudnn_input_dims_; + vector cudnn_input_dims_; float ratio_; bool is_test_; @@ -113,7 +113,7 @@ class CuDNNDropoutGradientOp final : public Operator { cudnnTensorDescriptor_t data_desc_; cudnnDropoutDescriptor_t dropout_desc_; - at::IntList cudnn_input_dims_; + vector cudnn_input_dims_; Blob* scratch_blob_; @@ -150,7 +150,7 @@ bool CuDNNDropoutOp::DoRunWithType() { if (X.sizes() != cudnn_input_dims_) { CAFFE_ENFORCE(scratch_blob_); Tensor* states = BlobGetMutableTensor(scratch_blob_, CUDA); - cudnn_input_dims_ = X.sizes(); + cudnn_input_dims_ = X.sizes().vec(); CUDNN_ENFORCE(cudnnSetTensor4dDescriptor( data_desc_, GetCudnnTensorFormat(StorageOrder::NCHW), @@ -246,7 +246,7 @@ bool CuDNNDropoutGradientOp::DoRunWithType() { } if (dY.sizes() != cudnn_input_dims_) { - cudnn_input_dims_ = dY.sizes(); + cudnn_input_dims_ = dY.sizes().vec(); CUDNN_ENFORCE(cudnnSetTensor4dDescriptor( data_desc_, GetCudnnTensorFormat(StorageOrder::NCHW), -- 2.7.4