From: Supriya Rao Date: Mon, 16 Aug 2021 05:44:44 +0000 (-0700) Subject: [docs][ao] Add missing docstrings for quantized_max_pool1d and quantized_max_pool2d... X-Git-Tag: accepted/tizen/8.0/unified/20231005.095509~1005 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=0831b59cf5fcc9dcefa16d1562b9770b0ba32316;p=platform%2Fupstream%2Fpytorch.git [docs][ao] Add missing docstrings for quantized_max_pool1d and quantized_max_pool2d (#63242) Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/63242 These functions are part of the native functions namespace as well as the quantized namespace Test Plan: CI Imported from OSS Reviewed By: jerryzh168 Differential Revision: D30316430 fbshipit-source-id: cd9c839e5c1a961e3c6944e514c16fbc256a2f0c --- diff --git a/docs/source/torch.rst b/docs/source/torch.rst index febd119..88cbc69 100644 --- a/docs/source/torch.rst +++ b/docs/source/torch.rst @@ -353,6 +353,8 @@ Pointwise Ops positive pow quantized_batch_norm + quantized_max_pool1d + quantized_max_pool2d rad2deg real reciprocal diff --git a/torch/_torch_docs.py b/torch/_torch_docs.py index 35e1dce..6ab55aa 100644 --- a/torch/_torch_docs.py +++ b/torch/_torch_docs.py @@ -11272,6 +11272,70 @@ Example:: """) +add_docstr(torch.quantized_max_pool1d, + r""" +quantized_max_pool1d(input, kernel_size, stride=[], padding=0, dilation=1, ceil_mode=False) -> Tensor + +Applies a 1D max pooling over an input quantized tensor composed of several input planes. + +Arguments: + input (Tensor): quantized tensor + kernel_size (list of int): the size of the sliding window + stride (``list of int``, optional): the stride of the sliding window + padding (``list of int``, opttional): padding to be added on both sides, must be >= 0 and <= kernel_size / 2 + dilation (``list of int``, optional): The stride between elements within a sliding window, must be > 0. Default 1 + ceil_mode (bool, optional): If True, will use ceil instead of floor to compute the output shape. + Defaults to False. + + +Returns: + Tensor: A quantized tensor with max_pool1d applied. + +Example:: + + >>> qx = torch.quantize_per_tensor(torch.rand(2, 2), 1.5, 3, torch.quint8) + >>> torch.quantized_max_pool1d(qx, [2]) + tensor([[0.0000], + [1.5000]], size=(2, 1), dtype=torch.quint8, + quantization_scheme=torch.per_tensor_affine, scale=1.5, zero_point=3) +""") + + +add_docstr(torch.quantized_max_pool2d, + r""" +quantized_max_pool2d(input, kernel_size, stride=[], padding=0, dilation=1, ceil_mode=False) -> Tensor + +Applies a 2D max pooling over an input quantized tensor composed of several input planes. + +Arguments: + input (Tensor): quantized tensor + kernel_size (``list of int``): the size of the sliding window + stride (``list of int``, optional): the stride of the sliding window + padding (``list of int``, optional): padding to be added on both sides, must be >= 0 and <= kernel_size / 2 + dilation (``list of int``, optional): The stride between elements within a sliding window, must be > 0. Default 1 + ceil_mode (bool, optional): If True, will use ceil instead of floor to compute the output shape. + Defaults to False. + + +Returns: + Tensor: A quantized tensor with max_pool2d applied. + +Example:: + + >>> qx = torch.quantize_per_tensor(torch.rand(2, 2, 2, 2), 1.5, 3, torch.quint8) + >>> torch.quantized_max_pool2d(qx, [2,2]) + tensor([[[[1.5000]], + + [[1.5000]]], + + + [[[0.0000]], + + [[0.0000]]]], size=(2, 2, 1, 1), dtype=torch.quint8, + quantization_scheme=torch.per_tensor_affine, scale=1.5, zero_point=3) +""") + + add_docstr(torch.Generator, r""" Generator(device='cpu') -> Generator