From: Supriya Rao Date: Mon, 16 Aug 2021 05:44:44 +0000 (-0700) Subject: [docs][ao] Add missing documentation for torch.quantized_batch_norm (#63240) X-Git-Tag: accepted/tizen/8.0/unified/20231005.095509~1006 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=a090073fe438c44c9fabf82958638f1775e897d3;p=platform%2Fupstream%2Fpytorch.git [docs][ao] Add missing documentation for torch.quantized_batch_norm (#63240) Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/63240 Op is exposed via torch.quantized_batch_norm to the end user without any existing documentation Test Plan: CI Imported from OSS Reviewed By: VitalyFedyunin Differential Revision: D30316431 fbshipit-source-id: bf2dc8b7b6f497cf73528eaa2bedef9f65029d84 --- diff --git a/docs/source/torch.rst b/docs/source/torch.rst index 6fa2ef9..febd119 100644 --- a/docs/source/torch.rst +++ b/docs/source/torch.rst @@ -352,6 +352,7 @@ Pointwise Ops polygamma positive pow + quantized_batch_norm rad2deg real reciprocal diff --git a/torch/_torch_docs.py b/torch/_torch_docs.py index d23b1d1..35e1dce 100644 --- a/torch/_torch_docs.py +++ b/torch/_torch_docs.py @@ -11228,6 +11228,50 @@ Example:: [100, 200]], dtype=torch.uint8) """) + +add_docstr(torch.quantized_batch_norm, + r""" +quantized_batch_norm(input, weight=None, bias=None, mean, var, eps, output_scale, output_zero_point) -> Tensor + +Applies batch normalization on a 4D (NCHW) quantized tensor. + +.. math:: + + y = \frac{x - \mathrm{E}[x]}{\sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta + +Arguments: + input (Tensor): quantized tensor + weight (Tensor): float tensor that corresponds to the gamma, size C + bias (Tensor): float tensor that corresponds to the beta, size C + mean (Tensor): float mean value in batch normalization, size C + var (Tensor): float tensor for variance, size C + eps (float): a value added to the denominator for numerical stability. + output_scale (float): output quantized tensor scale + output_zero_point (int): output quantized tensor zero_point + +Returns: + Tensor: A quantized tensor with batch normalization applied. + +Example:: + + >>> qx = torch.quantize_per_tensor(torch.rand(2, 2, 2, 2), 1.5, 3, torch.quint8) + >>> torch.quantized_batch_norm(qx, torch.ones(2), torch.zeros(2), torch.rand(2), torch.rand(2), 0.00001, 0.2, 2) + tensor([[[[-0.2000, -0.2000], + [ 1.6000, -0.2000]], + + [[-0.4000, -0.4000], + [-0.4000, 0.6000]]], + + + [[[-0.2000, -0.2000], + [-0.2000, -0.2000]], + + [[ 0.6000, -0.4000], + [ 0.6000, -0.4000]]]], size=(2, 2, 2, 2), dtype=torch.quint8, + quantization_scheme=torch.per_tensor_affine, scale=0.2, zero_point=2) +""") + + add_docstr(torch.Generator, r""" Generator(device='cpu') -> Generator