From: Brennan Vincent Date: Mon, 4 Mar 2019 06:13:27 +0000 (-0800) Subject: Don't make factory methods create a tensor and then immediately copy it (#17565) X-Git-Tag: accepted/tizen/6.5/unified/20211028.231830~1000 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=6a297b86754324d850d1595947c5e5559b51acc2;p=platform%2Fupstream%2Fpytorch.git Don't make factory methods create a tensor and then immediately copy it (#17565) Summary: Create a `make_variable` override that moves out of a tensor instead of going through `shallow_copy_and_detach`. Call this override from factory methods like `empty` that create a brand new tensor, do nothing with it, and then copy it into a variable. Will update this with actual numbers, but it seems to get rid of around 20-40% of the overhead of calling `torch.empty(0)` Pull Request resolved: https://github.com/pytorch/pytorch/pull/17565 Differential Revision: D14266130 Pulled By: umanwizard fbshipit-source-id: f57d5f2ca3f80ee8ee96d50f905e852fd10db941 --- diff --git a/tools/autograd/gen_variable_factories.py b/tools/autograd/gen_variable_factories.py index b193fcc..fea70a3 100644 --- a/tools/autograd/gen_variable_factories.py +++ b/tools/autograd/gen_variable_factories.py @@ -13,7 +13,7 @@ inline at::Tensor ${name}(${formals}) { ${pre_record_trace} at::Tensor tensor = at::${name}(${actuals}); at::Tensor result = - autograd::make_variable(tensor, /*requires_grad=*/${requires_grad}); + autograd::make_variable_consuming(std::move(tensor), /*requires_grad=*/${requires_grad}); ${post_record_trace} return result; } diff --git a/torch/csrc/autograd/variable.h b/torch/csrc/autograd/variable.h index 7eea2b0..e726f9c 100644 --- a/torch/csrc/autograd/variable.h +++ b/torch/csrc/autograd/variable.h @@ -104,7 +104,8 @@ struct TORCH_API Variable : public at::Tensor { bool allow_tensor_metadata_change, Edge gradient_edge); - /// Creates a `Variable` from the given `Tensor`. `requires_grad` should be + /// Creates a `Variable` from the given `Tensor`, copying its underlying `TensorImpl`. + /// `requires_grad` should be /// set only for leaves, and determines whether the `Variable` will accumulate /// gradients. NOTE: `data` must *not* be a `Variable` already. Its dynamic /// type *must* be `Tensor`. @@ -113,6 +114,16 @@ struct TORCH_API Variable : public at::Tensor { bool requires_grad, bool allow_tensor_metadata_change); + /// Creates a `Variable` from the given `Tensor`, consuming its underlying `TensorImpl`. + /// This is intended to be used from functions that immediately create a `Tensor`, + /// convert it to a `Variable`, and then free it; it has been found to + /// decrease the overhead of those operations, in some situations. + /// The comments about `requires_grad` and `data` on the above version also apply to this one. + friend Variable make_variable_consuming( + at::Tensor data, + bool requires_grad, + bool allow_tensor_metadata_change); + /// Creates a `Variable` from the given `Tensor` and specify a /// `gradient_edge`, i.e. a (function, input_nr) pair specifying the function /// in the autograd graph, and what particular input of that function, this @@ -576,6 +587,22 @@ inline Variable make_variable( return Variable(); } +inline Variable make_variable_consuming( + at::Tensor data, + bool requires_grad = false, + bool allow_tensor_metadata_change = true) { + AT_CHECK( + !data.is_variable(), + "Must not create a new variable from a variable, use its .data()"); + if (data.defined()) { + AT_ASSERT(data.getIntrusivePtr().use_count() == 1); + data.unsafeGetTensorImpl()->set_allow_tensor_metadata_change(allow_tensor_metadata_change); + auto autograd_meta = c10::guts::make_unique(); + return Variable(c10::make_intrusive(std::move(data), std::move(autograd_meta), requires_grad)); + } + return Variable(); +} + inline Variable make_variable( at::Tensor data, Edge gradient_edge,