From: Will Feng Date: Sat, 23 Mar 2019 19:47:15 +0000 (-0700) Subject: Move pyobj_ to TensorImpl (#18225) X-Git-Tag: accepted/tizen/6.5/unified/20211028.231830~661 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=32d0e7e339d22636ccb43d5d8a85b70791dd2dba;p=platform%2Fupstream%2Fpytorch.git Move pyobj_ to TensorImpl (#18225) Summary: Currently, `THPVariable_Wrap(…)` and `THPVariable_NewWithVar(…)` depend on the existence of `pyobj_` in the autograd metadata of a Variable to convert the Variable to a Python tensor. However, after the Variable/Tensor merge, there will be Variables that don't contain autograd metadata, and to allow the conversion from non-autograd-meta Variable to a Python tensor we need to store the `pyobj_` outside of autograd metadata and in a place where it will always be available. This PR makes it possible by moving `pyobj_` into TensorImpl, so that `THPVariable_Wrap(…)` and `THPVariable_NewWithVar(…)` can always access a Variable's `pyobj_` and convert the Variable to a Python tensor. Pull Request resolved: https://github.com/pytorch/pytorch/pull/18225 Differential Revision: D14562616 Pulled By: yf225 fbshipit-source-id: 18d4aaace70eee6120abaf9276036d1f8f51b18d --- diff --git a/c10/core/TensorImpl.h b/c10/core/TensorImpl.h index a1f08d7..6a8f408 100644 --- a/c10/core/TensorImpl.h +++ b/c10/core/TensorImpl.h @@ -15,6 +15,7 @@ #include #include #include +#include // A global boolean variable to control whether we free memory when a Tensor // is shrinked to a smaller size. As a result, a Tensor is always going to @@ -860,6 +861,14 @@ struct C10_API TensorImpl : public c10::intrusive_ptr_target { return impl; } + inline void set_pyobj(PyObject* pyobj) noexcept { + pyobj_ = pyobj; + } + + inline PyObject* pyobj() const noexcept { + return pyobj_; + } + private: // As an optimization, get_device handles the typical CUDA Tensor case and // calls get_device_slow if the tensor stores its device somewhere else @@ -1368,6 +1377,8 @@ protected: // at a time). std::unique_ptr autograd_meta_ = nullptr; + PyObject* pyobj_ = nullptr; // weak reference + // We could save a word or two by combining the SmallVector structs, // since their size is redundant, and if we need to overflow the buffer space // we could keep the two pointers together. However, that would require @@ -1461,10 +1472,11 @@ protected: // numel // data type pointer // autograd metadata pointer +// PyObject pointer // miscellaneous bitfield // static_assert(sizeof(void*) != sizeof(int64_t) || // if 64-bit... - sizeof(TensorImpl) == sizeof(int64_t) * 25, + sizeof(TensorImpl) == sizeof(int64_t) * 26, "You changed the size of TensorImpl on 64-bit arch." "See Note [TensorImpl size constraints] on how to proceed."); diff --git a/c10/util/python_stub.h b/c10/util/python_stub.h new file mode 100644 index 0000000..336c530 --- /dev/null +++ b/c10/util/python_stub.h @@ -0,0 +1,4 @@ +#pragma once + +struct _object; +using PyObject = _object; diff --git a/torch/csrc/autograd/variable.cpp b/torch/csrc/autograd/variable.cpp index d6bac0d..33b9537 100644 --- a/torch/csrc/autograd/variable.cpp +++ b/torch/csrc/autograd/variable.cpp @@ -28,7 +28,6 @@ Variable::Impl::Impl(at::Tensor data, std::unique_ptr au autograd_meta->requires_grad_ = false; autograd_meta->is_view_ = false; autograd_meta->output_nr_ = gradient_edge.input_nr; - autograd_meta->pyobj_ = nullptr; // set_requires_grad also checks error conditions. autograd_meta->set_requires_grad(requires_grad, this); diff --git a/torch/csrc/autograd/variable.h b/torch/csrc/autograd/variable.h index c90e61a..6640819 100644 --- a/torch/csrc/autograd/variable.h +++ b/torch/csrc/autograd/variable.h @@ -348,7 +348,6 @@ struct TORCH_API Variable::AutogradMeta : public c10::AutogradMetaInterface { // We use this to make sure we can setup the backwards trace // correctly when this variable is passed to another function. uint32_t output_nr_; - PyObject* pyobj_ = nullptr; // weak reference // Mutex to ensure that concurrent read operations that modify internal // state are still thread-safe. Used by grad_fn() and @@ -756,11 +755,11 @@ inline const std::string& Variable::name() const noexcept { } inline void Variable::set_pyobj(PyObject* pyobj) noexcept { - get_autograd_meta()->pyobj_ = pyobj; + get()->set_pyobj(pyobj); } inline PyObject* Variable::pyobj() const noexcept { - return get_autograd_meta()->pyobj_; + return get()->pyobj(); } inline Variable::AutogradMeta* Variable::get_autograd_meta() const noexcept {