From: Jerry Zhang Date: Thu, 20 Dec 2018 05:34:36 +0000 (-0800) Subject: Tensor construction codemod(ResizeLike) - 1/7 (#15073) X-Git-Tag: accepted/tizen/6.5/unified/20211028.231830~2156 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=3fc889e9763c8224af2aff1f4e0cc75c289bd2bc;p=platform%2Fupstream%2Fpytorch.git Tensor construction codemod(ResizeLike) - 1/7 (#15073) Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/15073 Codemod generated with clangr shard mode, 25 files per diff, motivation: https://github.com/pytorch/pytorch/pull/12407 Reviewed By: dzhulgakov Differential Revision: D13419563 fbshipit-source-id: 8c284405fa3a867303216df876ee6b20d8a46551 --- diff --git a/caffe2/cuda_rtc/pool_op_rtc_gpu.cc b/caffe2/cuda_rtc/pool_op_rtc_gpu.cc index 564a469..c27e7e5 100644 --- a/caffe2/cuda_rtc/pool_op_rtc_gpu.cc +++ b/caffe2/cuda_rtc/pool_op_rtc_gpu.cc @@ -257,8 +257,8 @@ class MaxPoolGradientRTCOp final : public ConvPoolOpBase { auto& Y = Input(1); auto& dY = Input(2); CAFFE_ENFORCE_EQ(dY.dim(), 4); - auto* dX = Output(0); - dX->ResizeLike(X); + + auto* dX = Output(0, X.sizes(), at::dtype()); ConvPoolOpBase::ComputePads({X.dim32(2), X.dim32(3)}); if (input_dims_ != X.sizes()) { VLOG(1) << "MaxPoolGradient RTC recompiling"; diff --git a/caffe2/experiments/operators/fully_connected_op_decomposition.h b/caffe2/experiments/operators/fully_connected_op_decomposition.h index 432189d..4af9ac9 100644 --- a/caffe2/experiments/operators/fully_connected_op_decomposition.h +++ b/caffe2/experiments/operators/fully_connected_op_decomposition.h @@ -138,11 +138,9 @@ class FullyConnectedDecompGradientOp : public Operator { DCHECK_EQ(X.dim(), 1); DCHECK_EQ(N, dY.numel()); } - auto* dU = Output(0); - auto* dV = Output(1); - dU->ResizeLike(U); - dV->ResizeLike(V); + auto* dU = Output(0, U.sizes(), at::dtype()); + auto* dV = Output(1, V.sizes(), at::dtype()); auto* db = Output(2, {N}, at::dtype()); // Compute dU @@ -189,8 +187,7 @@ class FullyConnectedDecompGradientOp : public Operator { &context_); // Compute dX if necessary. if (OutputSize() == 4) { - auto* dX = Output(3); - dX->ResizeLike(X); + auto* dX = Output(3, X.sizes(), at::dtype()); dx_buffer_.Resize(M, middle); T* dx_buffer_data = dx_buffer_.template mutable_data(); math::Gemm( diff --git a/caffe2/experiments/operators/fully_connected_op_prune.h b/caffe2/experiments/operators/fully_connected_op_prune.h index 179fb676..462ada3 100644 --- a/caffe2/experiments/operators/fully_connected_op_prune.h +++ b/caffe2/experiments/operators/fully_connected_op_prune.h @@ -220,7 +220,7 @@ namespace caffe2 { auto* Ag_dW_ptr = Output(4); auto& Ag_dW = *Ag_dW_ptr; // it is also the Input(5) - auto* mask_seq_auto = Output(5); + // how about get threshold auto& thres = Input(6); //TODO(wyiming): check comp_lb is a float @@ -251,9 +251,8 @@ namespace caffe2 { DCHECK_EQ(X.dim(), 1); DCHECK_EQ(N, dY.numel()); } - auto* dW = Output(0); - dW->ResizeLike(W); + auto* dW = Output(0, W.sizes(), at::dtype()); auto* db = Output(1, {N}, at::dtype()); // Compute dW @@ -292,7 +291,7 @@ namespace caffe2 { Ag_dW.template mutable_data(), sum_buffer_.template mutable_data(), &context_); - mask_seq_auto->ResizeLike(W); + auto* mask_seq_auto = Output(5, W.sizes(), at::dtype()); T* mask_seq = mask_seq_auto->template mutable_data(); math::Set(N*K, static_cast(0), mask_seq_auto->template mutable_data(), &context_); @@ -338,8 +337,7 @@ namespace caffe2 { &context_); // Compute dX if necessary. if (OutputSize() == 7) { - auto* dX = Output(6); - dX->ResizeLike(X); + auto* dX = Output(6, X.sizes(), at::dtype()); math::Gemm( CblasNoTrans, CblasNoTrans, M, K, N, 1, dY.template data(), W.template data(), diff --git a/caffe2/experiments/operators/funhash_op.h b/caffe2/experiments/operators/funhash_op.h index 0ccfeac..b07ca68 100644 --- a/caffe2/experiments/operators/funhash_op.h +++ b/caffe2/experiments/operators/funhash_op.h @@ -164,8 +164,8 @@ class FunHashGradientOp : public Operator { if (adaptive_) { const auto& alpha = Input(5); num_alpha = alpha.size(0); - auto* grad_alpha = Output(1); - grad_alpha->ResizeLike(alpha); + + auto* grad_alpha = Output(1, alpha.sizes(), at::dtype()); grad_alpha_data = grad_alpha->template mutable_data(); memset(grad_alpha_data, 0, sizeof(T) * num_alpha); } @@ -175,8 +175,7 @@ class FunHashGradientOp : public Operator { int64_t num_weight = weight.size(0); int64_t num_nz_ent = seg.size(0); - auto* grad_weight = Output(0); - grad_weight->ResizeLike(weight); + auto* grad_weight = Output(0, weight.sizes(), at::dtype()); T* grad_weight_data = grad_weight->template mutable_data(); const auto* grad_out_data = grad_out.template data(); diff --git a/caffe2/experiments/operators/sparse_funhash_op.h b/caffe2/experiments/operators/sparse_funhash_op.h index e8a1cba..5860331 100644 --- a/caffe2/experiments/operators/sparse_funhash_op.h +++ b/caffe2/experiments/operators/sparse_funhash_op.h @@ -163,8 +163,8 @@ class SparseFunHashGradientOp : public Operator { if (adaptive_) { const auto& alpha = Input(5); num_alpha = alpha.size(0); - auto* grad_alpha = Output(2); - grad_alpha->ResizeLike(alpha); + + auto* grad_alpha = Output(2, alpha.sizes(), at::dtype()); grad_alpha_data = grad_alpha->template mutable_data(); memset(grad_alpha_data, 0, sizeof(T) * num_alpha); }