.. autoattribute:: is_cuda
.. autoattribute:: device
+ .. autoattribute:: grad
.. automethod:: abs
.. automethod:: abs_
.. automethod:: apply_
.. automethod:: argmax
.. automethod:: argmin
+ .. automethod:: argsort
.. automethod:: asin
.. automethod:: asin_
.. automethod:: atan
.. automethod:: atan2
.. automethod:: atan2_
.. automethod:: atan_
+ .. automethod:: backward
.. automethod:: baddbmm
.. automethod:: baddbmm_
.. automethod:: bernoulli
.. automethod:: bernoulli_
+ .. automethod:: bincount
.. automethod:: bmm
.. automethod:: byte
.. automethod:: btrifact
.. automethod:: cumsum
.. automethod:: data_ptr
.. automethod:: det
+ .. automethod:: dense_dim
+ .. automethod:: detach
+ .. automethod:: detach_
.. automethod:: diag
.. automethod:: diag_embed
+ .. automethod:: diagflat
+ .. automethod:: diagonal
+ .. automethod:: digamma
+ .. automethod:: digamma_
.. automethod:: dim
.. automethod:: dist
.. automethod:: div
.. automethod:: expand
.. automethod:: expand_as
.. automethod:: exponential_
+ .. automethod:: fft
.. automethod:: fill_
.. automethod:: flatten
.. automethod:: flip
.. automethod:: gt
.. automethod:: gt_
.. automethod:: half
+ .. automethod:: hardshrink
.. automethod:: histc
+ .. automethod:: ifft
.. automethod:: index_add_
.. automethod:: index_add
.. automethod:: index_copy_
.. automethod:: index_put_
.. automethod:: index_put
.. automethod:: index_select
+ .. automethod:: indices
.. automethod:: int
.. automethod:: inverse
+ .. automethod:: irfft
.. automethod:: is_contiguous
.. automethod:: is_floating_point
+ .. automethod:: is_leaf
.. automethod:: is_pinned
.. automethod:: is_set_to
+ .. automethod:: is_shared
.. automethod:: is_signed
+ .. automethod:: is_sparse
.. automethod:: item
.. automethod:: kthvalue
.. automethod:: le
.. automethod:: mvlgamma
.. automethod:: mvlgamma_
.. automethod:: narrow
+ .. automethod:: narrow_copy
.. automethod:: ndimension
.. automethod:: ne
.. automethod:: ne_
.. automethod:: random_
.. automethod:: reciprocal
.. automethod:: reciprocal_
+ .. automethod:: register_hook
.. automethod:: remainder
.. automethod:: remainder_
.. automethod:: renorm
.. automethod:: renorm_
.. automethod:: repeat
+ .. automethod:: requires_grad
.. automethod:: requires_grad_
.. automethod:: reshape
.. automethod:: reshape_as
.. automethod:: resize_
.. automethod:: resize_as_
+ .. automethod:: retain_grad
+ .. automethod:: rfft
.. automethod:: roll
+ .. automethod:: rot90
.. automethod:: round
.. automethod:: round_
.. automethod:: rsqrt
.. automethod:: rsqrt_
- .. automethod:: scatter_
.. automethod:: scatter
+ .. automethod:: scatter_
.. automethod:: scatter_add_
.. automethod:: scatter_add
.. automethod:: select
.. automethod:: sort
.. automethod:: split
.. automethod:: sparse_mask
+ .. automethod:: sparse_dim
.. automethod:: sqrt
.. automethod:: sqrt_
.. automethod:: squeeze
.. automethod:: squeeze_
.. automethod:: std
+ .. automethod:: stft
.. automethod:: storage
.. automethod:: storage_offset
.. automethod:: storage_type
.. automethod:: sub
.. automethod:: sub_
.. automethod:: sum
+ .. automethod:: sum_to_size
.. automethod:: svd
.. automethod:: symeig
.. automethod:: t
.. automethod:: trunc_
.. automethod:: type
.. automethod:: type_as
+ .. automethod:: unbind
.. automethod:: unfold
.. automethod:: uniform_
.. automethod:: unique
.. automethod:: unsqueeze
.. automethod:: unsqueeze_
+ .. automethod:: values
.. automethod:: var
.. automethod:: view
.. automethod:: view_as
+ .. automethod:: where
.. automethod:: zero_
.. class:: ByteTensor()
import re
import ast
import _ast
+import textwrap
path = os.path.dirname(os.path.realpath(__file__))
rstpath = os.path.join(path, '../docs/source/')
pypath = os.path.join(path, '../torch/_torch_docs.py')
r1 = re.compile(r'\.\. autofunction:: (\w*)')
+r2 = re.compile(r'\.\. auto(?:method|attribute):: (\w*)')
class TestDocCoverage(unittest.TestCase):
- def test_torch(self):
- # get symbols documented in torch.rst
- whitelist = [
- 'set_printoptions', 'get_rng_state', 'is_storage', 'initial_seed',
- 'set_default_tensor_type', 'load', 'save', 'set_default_dtype',
- 'is_tensor', 'compiled_with_cxx11_abi', 'set_rng_state',
- 'manual_seed'
- ]
- everything = set()
- filename = os.path.join(rstpath, 'torch.rst')
+ @staticmethod
+ def parse_rst(filename, regex):
+ filename = os.path.join(rstpath, filename)
+ ret = set()
with open(filename, 'r') as f:
lines = f.readlines()
for l in lines:
l = l.strip()
- name = r1.findall(l)
+ name = regex.findall(l)
if name:
- everything.add(name[0])
- everything -= set(whitelist)
+ ret.add(name[0])
+ return ret
+
+ def test_torch(self):
+ # get symbols documented in torch.rst
+ in_rst = self.parse_rst('torch.rst', r1)
# get symbols in functional.py and _torch_docs.py
- whitelist2 = ['product', 'inf', 'math', 'reduce', 'warnings', 'torch', 'annotate']
- everything2 = set()
- with open(pypath, 'r') as f:
- body = ast.parse(f.read()).body
- for i in body:
- if not isinstance(i, _ast.Expr):
- continue
- i = i.value
- if not isinstance(i, _ast.Call):
- continue
- if i.func.id != 'add_docstr':
- continue
- i = i.args[0]
- if i.value.id != 'torch':
- continue
- i = i.attr
- everything2.add(i)
- for p in dir(torch.functional):
- if not p.startswith('_') and p[0].islower():
- everything2.add(p)
- everything2 -= set(whitelist2)
+ whitelist = {
+ # below are some jit functions
+ 'wait', 'fork', 'parse_type_comment', 'import_ir_module',
+ 'to_batch_graph', 'import_ir_module_from_buffer',
+ 'register_batch_operator', 'merge_type_from_type_comment',
+
+ # below are symbols mistakely binded to torch.*, but should
+ # go to torch.nn.functional.* instead
+ 'avg_pool1d', 'conv_transpose2d', 'conv_transpose1d', 'conv3d',
+ 'relu_', 'pixel_shuffle', 'conv2d', 'selu_', 'celu_', 'threshold_',
+ 'cosine_similarity', 'rrelu_', 'conv_transpose3d', 'conv1d', 'pdist',
+ 'adaptive_avg_pool1d', 'conv_tbc'
+ }
+ has_docstring = set(
+ a for a in dir(torch)
+ if getattr(torch, a).__doc__ and not a.startswith('_') and
+ 'function' in type(getattr(torch, a)).__name__)
+ self.assertEqual(
+ has_docstring & whitelist, whitelist,
+ textwrap.dedent('''
+ The whitelist in test_docs_coverage.py contains something
+ that don't have docstring or not in torch.*. If you just
+ removed something from torch.*, please remove it from whiltelist
+ in test_docs_coverage.py'''))
+ has_docstring -= whitelist
# assert they are equal
- for p in everything:
- self.assertIn(p, everything2, 'in torch.rst but not in python')
- for p in everything2:
- self.assertIn(p, everything, 'in python but not in torch.rst')
+ self.assertEqual(
+ has_docstring, in_rst,
+ textwrap.dedent('''
+ List of functions documented in torch.rst and in python are different.
+ Do you forget to add new thing to torch.rst, or whitelist things you
+ don't want to document?''')
+ )
+
+ def test_tensor(self):
+ in_rst = self.parse_rst('tensors.rst', r2)
+ classes = [torch.FloatTensor, torch.LongTensor, torch.ByteTensor]
+ has_docstring = set(x for c in classes for x in dir(c) if not x.startswith('_') and getattr(c, x).__doc__)
+ self.assertEqual(
+ has_docstring, in_rst,
+ textwrap.dedent('''
+ List of tensor methods documented in tensor.rst and in python are
+ different. Do you forget to add new thing to tensor.rst, or whitelist
+ things you don't want to document?''')
+ )
if __name__ == '__main__':
accumulate (bool): whether to accumulate into self
""")
+add_docstr_all('index_put',
+ r"""
+index_put(indices, value, accumulate=False) -> Tensor
+
+Out-place version of :meth:`~Tensor.index_put_`
+""")
+
add_docstr_all('index_select',
r"""
index_select(dim, index) -> Tensor
Returns True if the data type of :attr:`self` is a floating point data type.
""")
+add_docstr_all('is_signed',
+ r"""
+is_signed() -> bool
+
+Returns True if the data type of :attr:`self` is a signed data type.
+""")
+
add_docstr_all('is_set_to',
r"""
is_set_to(tensor) -> bool
r"""
storage() -> torch.Storage
-Returns the underlying storage
+Returns the underlying storage.
""")
add_docstr_all('storage_offset',
""")
+add_docstr_all('storage_type',
+ r"""
+storage_type() -> type
+
+Returns the type of the underlying storage.
+""")
+
add_docstr_all('stride',
r"""
stride(dim) -> tuple or int
See :func:`torch.take`
""")
+add_docstr_all('tan',
+ r"""
+tan() -> Tensor
+
+See :func:`torch.tan`
+""")
+
add_docstr_all('tan_',
r"""
tan_() -> Tensor
See :func:`torch.unbind`
""")
+add_docstr_all('pin_memory',
+ r"""
+pin_memory() -> Tensor
+
+Copies the tensor to pinned memory, if it's not already pinned.
+""")
+
add_docstr_all('pinverse',
r"""
pinverse() -> Tensor