From: Jerry Zhang Date: Thu, 6 Dec 2018 19:14:48 +0000 (-0800) Subject: Tensor construction codemod - 1/3 (#14828) X-Git-Tag: accepted/tizen/6.5/unified/20211028.231830~2417 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=20d1bff2927fc95eaadbc94b78f29b8ea0171d54;p=platform%2Fupstream%2Fpytorch.git Tensor construction codemod - 1/3 (#14828) Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/14828 Codemod generated with clangr shard mode, 25 files per diff, motivation: https://github.com/pytorch/pytorch/pull/12407 Reviewed By: bddppq Differential Revision: D13335160 fbshipit-source-id: a3ae4c5a86bfbdaf2d5aa14e0eef57255e829fd4 --- diff --git a/caffe2/experiments/operators/fully_connected_op_decomposition.h b/caffe2/experiments/operators/fully_connected_op_decomposition.h index 955177a..432189d 100644 --- a/caffe2/experiments/operators/fully_connected_op_decomposition.h +++ b/caffe2/experiments/operators/fully_connected_op_decomposition.h @@ -43,7 +43,7 @@ class FullyConnectedOpDecomp final : public Operator { const auto& U = Input(1); const auto& V = Input(2); const auto& b = Input(3); - auto* Y = Output(0); + //auto* buffer_ptr = Output(1); // Size M * middle; //auto& multi_buffer_ = *buffer_ptr; @@ -64,15 +64,17 @@ class FullyConnectedOpDecomp final : public Operator { int middle = U.dim32(0); CAFFE_ENFORCE_EQ(K, V.dim32(0)); CAFFE_ENFORCE_EQ(N, b.dim32(0)); + std::vector dims; if (X.dim() > 1) { - Y->Resize(M, N); + dims = {M, N}; multi_buffer_.Resize(M, middle); } else { - Y->Resize(N); + dims = {N}; multi_buffer_.Resize(middle); } - // The col buffer is stored in CHW order as well - kernel_dim, and the height - // and width. + auto* Y = Output(0, dims, at::dtype()); + // The col buffer is stored in CHW order as well - kernel_dim, and the + // height and width. // multi_buffer_.Resize(M, middle); T* multi_buffer_data = multi_buffer_.template mutable_data(); // X * V * tans(U) @@ -138,10 +140,10 @@ class FullyConnectedDecompGradientOp : public Operator { } auto* dU = Output(0); auto* dV = Output(1); - auto* db = Output(2); + dU->ResizeLike(U); dV->ResizeLike(V); - db->Resize(N); + auto* db = Output(2, {N}, at::dtype()); // Compute dU // first compute X * V diff --git a/caffe2/experiments/operators/fully_connected_op_prune.h b/caffe2/experiments/operators/fully_connected_op_prune.h index 4bd7fc0..179fb676 100644 --- a/caffe2/experiments/operators/fully_connected_op_prune.h +++ b/caffe2/experiments/operators/fully_connected_op_prune.h @@ -135,7 +135,7 @@ namespace caffe2 { const auto& W = Input(1); const auto& Mask = Input(2); const auto& b = Input(3); - auto* Y = Output(0); + CAFFE_ENFORCE_GE(X.dim(), 1); CAFFE_ENFORCE_GE(W.dim(), 2); if (X.dim() > 2 || W.dim() > 2) { @@ -151,11 +151,13 @@ namespace caffe2 { int N = W.dim32(0); CAFFE_ENFORCE_EQ(K, W.numel() / W.dim32(0)); CAFFE_ENFORCE_EQ(N, b.dim32(0)); + std::vector dims; if (X.dim() > 1) { - Y->Resize(M, N); + dims = {M, N}; } else { - Y->Resize(N); + dims = {N}; } + auto* Y = Output(0, dims, at::dtype()); // W * x math::Gemm( CblasNoTrans, CblasTrans, M, N, K, 1, X.template data(), @@ -176,8 +178,7 @@ namespace caffe2 { bias_multiplier_.template data(), b.template data(), 1, Y->template mutable_data(), &context_); if (OutputSize() == 2){ - auto* Comp_rate = Output(1); - Comp_rate->Resize(vector()); + auto* Comp_rate = Output(1, vector(), at::dtype()); T* comp_data = Comp_rate->template mutable_data(); math::Sum( Mask.numel(), Mask.template data(), comp_data, &context_); @@ -251,9 +252,9 @@ namespace caffe2 { DCHECK_EQ(N, dY.numel()); } auto* dW = Output(0); - auto* db = Output(1); + dW->ResizeLike(W); - db->Resize(N); + auto* db = Output(1, {N}, at::dtype()); // Compute dW math::Gemm( diff --git a/caffe2/experiments/operators/fully_connected_op_sparse.h b/caffe2/experiments/operators/fully_connected_op_sparse.h index 2f66822..a1d91d4 100644 --- a/caffe2/experiments/operators/fully_connected_op_sparse.h +++ b/caffe2/experiments/operators/fully_connected_op_sparse.h @@ -106,7 +106,7 @@ class FullyConnectedOp_SPARSE final : public Operator { const auto& jw = Input(3); // Notice that we do not need to transpose b const auto& b = Input(4); - auto* Yt = Output(0); // transposed Y + // transposed Y // here we assume X is k-by-m CAFFE_ENFORCE_EQ(Xt.dim(), 2); CAFFE_ENFORCE_EQ(b.dim(), 1); @@ -117,7 +117,7 @@ class FullyConnectedOp_SPARSE final : public Operator { // number of outputs. int N = iw.dim32(0)-1; CAFFE_ENFORCE_EQ(N, b.dim32(0)); - Yt->Resize(shape(N, M)); + auto* Yt = Output(0, shape(N, M), at::dtype()); // Y' = W * X'; Sparse_mm( diff --git a/caffe2/experiments/operators/funhash_op.h b/caffe2/experiments/operators/funhash_op.h index 4ea41ad..0ccfeac 100644 --- a/caffe2/experiments/operators/funhash_op.h +++ b/caffe2/experiments/operators/funhash_op.h @@ -75,8 +75,7 @@ class FunHashOp : public Operator { ++n_segments; } - auto* output = Output(0); - output->Resize(n_segments, num_outputs_); + auto* output = Output(0, {n_segments, num_outputs_}, at::dtype()); T* output_data = output->template mutable_data(); diff --git a/caffe2/experiments/operators/sparse_funhash_op.h b/caffe2/experiments/operators/sparse_funhash_op.h index b6d240c..e8a1cba 100644 --- a/caffe2/experiments/operators/sparse_funhash_op.h +++ b/caffe2/experiments/operators/sparse_funhash_op.h @@ -74,8 +74,7 @@ class SparseFunHashOp : public Operator { ++n_segments; } - auto* output = Output(0); - output->Resize(n_segments, num_outputs_); + auto* output = Output(0, {n_segments, num_outputs_}, at::dtype()); T* output_data = output->template mutable_data(); @@ -176,12 +175,11 @@ class SparseFunHashGradientOp : public Operator { int64_t num_nz_ent = seg.size(0); int64_t grad_weight_size = num_nz_ent * num_outputs_ * num_alpha; - auto* grad_weight_val = Output(0); - grad_weight_val->Resize(grad_weight_size); + + auto* grad_weight_val = Output(0, {grad_weight_size}, at::dtype()); T* grad_weight_val_data = grad_weight_val->template mutable_data(); - auto* grad_weight_ind = Output(1); - grad_weight_ind->Resize(grad_weight_size); + auto* grad_weight_ind = Output(1, {grad_weight_size}, at::dtype()); auto* grad_weight_ind_data = grad_weight_ind->template mutable_data(); diff --git a/caffe2/experiments/operators/sparse_matrix_reshape_op.h b/caffe2/experiments/operators/sparse_matrix_reshape_op.h index 89e96b3..e48665f 100644 --- a/caffe2/experiments/operators/sparse_matrix_reshape_op.h +++ b/caffe2/experiments/operators/sparse_matrix_reshape_op.h @@ -101,10 +101,9 @@ class SparseMatrixReshapeOp : public Operator { CAFFE_ENFORCE( old_row.numel() == nnz, "Column and row tensors must have the same size."); - auto* new_col = Output(0); - auto* new_row = Output(1); - new_col->Resize(nnz); - new_row->Resize(nnz); + + auto* new_col = Output(0, {nnz}, at::dtype()); + auto* new_row = Output(1, {nnz}, at::dtype()); const auto* old_col_data = old_col.template data(); const auto* old_row_data = old_row.template data(); diff --git a/caffe2/experiments/operators/tt_contraction_op.h b/caffe2/experiments/operators/tt_contraction_op.h index af28779..73865d1 100644 --- a/caffe2/experiments/operators/tt_contraction_op.h +++ b/caffe2/experiments/operators/tt_contraction_op.h @@ -40,7 +40,6 @@ class TTContractionOp final : public Operator { bool RunOnDevice() override { const auto& A = Input(0); const auto& B = Input(1); - auto* C = Output(0); CAFFE_ENFORCE(A.dim() == 2, A.dim()); @@ -58,7 +57,7 @@ class TTContractionOp final : public Operator { int64_t D_ = B_size / (K_ * N_); int64_t C_size = D_ * M_ * N_; - C->Resize(vector{C_size}); + auto* C = Output(0, vector{C_size}, at::dtype()); int64_t B_stride = K_ * N_; int64_t C_stride = M_ * N_; @@ -103,16 +102,14 @@ class TTContractionGradientOp final : public Operator { const auto& G = Input(0); const auto& A = Input(1); const auto& B = Input(2); - auto* dA = Output(0); - auto* dB = Output(1); int64_t G_size = G.numel(); int64_t D_ = G_size / (M_ * N_); int64_t dB_size = D_ * K_ * N_; - dA->Resize(A.sizes()); - dB->Resize(B.sizes()); + auto* dA = Output(0, A.sizes(), at::dtype()); + auto* dB = Output(1, B.sizes(), at::dtype()); int64_t B_stride = K_ * N_; int64_t G_stride = M_ * N_; diff --git a/caffe2/experiments/operators/tt_pad_op.h b/caffe2/experiments/operators/tt_pad_op.h index e25159d..b482e38 100644 --- a/caffe2/experiments/operators/tt_pad_op.h +++ b/caffe2/experiments/operators/tt_pad_op.h @@ -44,8 +44,7 @@ class TTPadOp final : public Operator { auto X_dim0 = X.size(0); auto X_dim1 = X.size(1); - auto* X_orig_dim0 = Output(1); - X_orig_dim0->Resize(1); + auto* X_orig_dim0 = Output(1, {1}, at::dtype()); *X_orig_dim0->template mutable_data() = X_dim0; if (X_dim0 % scale_ != 0) {