From 75f198d48d62823fa1f57d34df49a6fb1bade2bb Mon Sep 17 00:00:00 2001 From: Supriya Rao Date: Fri, 13 Aug 2021 07:58:38 -0700 Subject: [PATCH] [docs][ao] update quantize_per_tensor to mention overloads (#63165) Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/63165 Add details about the overloads for * list of tensors input * supporting tensor scale/zero-point inputs Test Plan: CI Imported from OSS Reviewed By: bdhirsh Differential Revision: D30291045 fbshipit-source-id: 9fc6418792c5e3a35417eeb8d31de4a4bfcbb7a5 --- torch/_torch_docs.py | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/torch/_torch_docs.py b/torch/_torch_docs.py index a26e8dc..876862d 100644 --- a/torch/_torch_docs.py +++ b/torch/_torch_docs.py @@ -11166,14 +11166,14 @@ quantize_per_tensor(input, scale, zero_point, dtype) -> Tensor Converts a float tensor to a quantized tensor with given scale and zero point. Arguments: - input (Tensor): float tensor to quantize - scale (float): scale to apply in quantization formula - zero_point (int): offset in integer value that maps to float zero + input (Tensor): float tensor or list of tensors to quantize + scale (float or Tensor): scale to apply in quantization formula + zero_point (int or Tensor): offset in integer value that maps to float zero dtype (:class:`torch.dtype`): the desired data type of returned tensor. Has to be one of the quantized dtypes: ``torch.quint8``, ``torch.qint8``, ``torch.qint32`` Returns: - Tensor: A newly quantized tensor + Tensor: A newly quantized tensor or list of quantized tensors. Example:: @@ -11182,6 +11182,15 @@ Example:: quantization_scheme=torch.per_tensor_affine, scale=0.1, zero_point=10) >>> torch.quantize_per_tensor(torch.tensor([-1.0, 0.0, 1.0, 2.0]), 0.1, 10, torch.quint8).int_repr() tensor([ 0, 10, 20, 30], dtype=torch.uint8) + >>> torch.quantize_per_tensor([torch.tensor([-1.0, 0.0]), torch.tensor([-2.0, 2.0])], + >>> torch.tensor([0.1, 0.2]), torch.tensor([10, 20]), torch.quint8) + (tensor([-1., 0.], size=(2,), dtype=torch.quint8, + quantization_scheme=torch.per_tensor_affine, scale=0.1, zero_point=10), + tensor([-2., 2.], size=(2,), dtype=torch.quint8, + quantization_scheme=torch.per_tensor_affine, scale=0.2, zero_point=20)) + >>> torch.quantize_per_tensor(torch.tensor([-1.0, 0.0, 1.0, 2.0]), torch.tensor(0.1), torch.tensor(10), torch.quint8) + tensor([-1., 0., 1., 2.], size=(4,), dtype=torch.quint8, + quantization_scheme=torch.per_tensor_affine, scale=0.10, zero_point=10) """) add_docstr(torch.quantize_per_channel, -- 2.7.4