""")
-@torch._jit_internal.weak_script
+@weak_script
def fractional_max_pool2d_with_indices(input, kernel_size, output_size=None,
output_ratio=None, return_indices=False,
_random_samples=None):
return torch._C._nn.fractional_max_pool2d(input, kernel_size, _output_size, _random_samples)
-@torch._jit_internal.weak_script
+@weak_script
def _fractional_max_pool2d(input, kernel_size, output_size=None,
output_ratio=None, return_indices=False,
_random_samples=None):
if_false=_fractional_max_pool2d)
-@torch._jit_internal.weak_script
+@weak_script
def max_pool1d_with_indices(input, kernel_size, stride=None, padding=0,
dilation=1, ceil_mode=False, return_indices=False):
# type: (Tensor, BroadcastingList1[int], Optional[BroadcastingList1[int]], BroadcastingList1[int], BroadcastingList1[int], bool, bool) -> Tuple[Tensor, Tensor] # noqa
input, kernel_size, _stride, padding, dilation, ceil_mode)
-@torch._jit_internal.weak_script
+@weak_script
def _max_pool1d(input, kernel_size, stride=None, padding=0, dilation=1,
ceil_mode=False, return_indices=False):
# type: (Tensor, BroadcastingList1[int], Optional[BroadcastingList1[int]], BroadcastingList1[int], BroadcastingList1[int], bool, bool) -> Tensor # noqa
if_false=_max_pool1d)
-@torch._jit_internal.weak_script
+@weak_script
def max_pool2d_with_indices(input, kernel_size, stride=None, padding=0, dilation=1,
ceil_mode=False, return_indices=False):
# type: (Tensor, BroadcastingList2[int], Optional[BroadcastingList2[int]], BroadcastingList2[int], BroadcastingList2[int], bool, bool) -> Tuple[Tensor, Tensor] # noqa
return torch._C._nn.max_pool2d_with_indices(input, kernel_size, _stride, padding, dilation, ceil_mode)
-@torch._jit_internal.weak_script
+@weak_script
def _max_pool2d(input, kernel_size, stride=None, padding=0, dilation=1,
ceil_mode=False, return_indices=False):
# type: (Tensor, BroadcastingList2[int], Optional[BroadcastingList2[int]], BroadcastingList2[int], BroadcastingList2[int], bool, bool) -> Tensor # noqa
if_false=_max_pool2d)
-@torch._jit_internal.weak_script
+@weak_script
def max_pool3d_with_indices(input, kernel_size, stride=None, padding=0,
dilation=1, ceil_mode=False, return_indices=False):
# type: (Tensor, BroadcastingList3[int], Optional[BroadcastingList3[int]], BroadcastingList3[int], BroadcastingList3[int], bool, bool) -> Tuple[Tensor, Tensor] # noqa
input, kernel_size, _stride, padding, dilation, ceil_mode)
-@torch._jit_internal.weak_script
+@weak_script
def _max_pool3d(input, kernel_size, stride=None, padding=0, dilation=1,
ceil_mode=False, return_indices=False):
# type: (Tensor, BroadcastingList3[int], Optional[BroadcastingList3[int]], BroadcastingList3[int], BroadcastingList3[int], bool, bool) -> Tensor # noqa
if_false=_max_pool3d)
-@torch._jit_internal.weak_script
+@weak_script
def _unpool_output_size(input, kernel_size, stride, padding, output_size):
# type: (Tensor, List[int], List[int], List[int], Optional[List[int]]) -> List[int]
input_size = input.size()
return ret
-@torch._jit_internal.weak_script
+@weak_script
def max_unpool1d(input, indices, kernel_size, stride=None, padding=0,
output_size=None):
# type: (Tensor, Tensor, BroadcastingList1[int], Optional[BroadcastingList1[int]], BroadcastingList1[int], Optional[BroadcastingList1[int]]) -> Tensor # noqa
return torch._C._nn.max_unpool2d(input.unsqueeze(3), indices.unsqueeze(3), output_size + [1]).squeeze(3)
-@torch._jit_internal.weak_script
+@weak_script
def max_unpool2d(input, indices, kernel_size, stride=None, padding=0,
output_size=None):
# type: (Tensor, Tensor, BroadcastingList2[int], Optional[BroadcastingList2[int]], BroadcastingList2[int], Optional[BroadcastingList2[int]]) -> Tensor # noqa
return torch._C._nn.max_unpool2d(input, indices, output_size)
-@torch._jit_internal.weak_script
+@weak_script
def max_unpool3d(input, indices, kernel_size, stride=None, padding=0,
output_size=None):
# type: (Tensor, Tensor, BroadcastingList3[int], Optional[BroadcastingList3[int]], BroadcastingList3[int], Optional[BroadcastingList3[int]]) -> Tensor # noqa
input, indices, output_size, _stride, padding)
-@torch._jit_internal.weak_script
+@weak_script
def lp_pool2d(input, norm_type, kernel_size, stride=None, ceil_mode=False):
# type: (Tensor, float, int, Optional[BroadcastingList2[int]], bool) -> Tensor
r"""Applies a 2D power-average pooling over an input signal composed of
return (torch.sign(out) * relu(torch.abs(out))).mul(kw * kh).pow(1. / norm_type)
-@torch._jit_internal.weak_script
+@weak_script
def lp_pool1d(input, norm_type, kernel_size, stride=None, ceil_mode=False):
# type: (Tensor, float, int, Optional[BroadcastingList1[int]], bool) -> Tensor
r"""Applies a 1D power-average pooling over an input signal composed of
return (torch.sign(out) * relu(torch.abs(out))).mul(kernel_size).pow(1. / norm_type)
-@torch._jit_internal.weak_script
+@weak_script
def adaptive_max_pool1d_with_indices(input, output_size, return_indices=False):
# type: (Tensor, BroadcastingList1[int], bool) -> Tuple[Tensor, Tensor]
r"""Applies a 1D adaptive max pooling over an input signal composed of
return torch.adaptive_max_pool1d(input, output_size)
-@torch._jit_internal.weak_script
+@weak_script
def _adaptive_max_pool1d(input, output_size, return_indices=False):
# type: (Tensor, BroadcastingList1[int], bool) -> Tensor
return adaptive_max_pool1d_with_indices(input, output_size)[0]
if_false=_adaptive_max_pool1d)
-@torch._jit_internal.weak_script
+@weak_script
def adaptive_max_pool2d_with_indices(input, output_size, return_indices=False):
# type: (Tensor, BroadcastingList1[int], bool) -> Tuple[Tensor, Tensor]
r"""Applies a 2D adaptive max pooling over an input signal composed of
return torch._C._nn.adaptive_max_pool2d(input, output_size)
-@torch._jit_internal.weak_script
+@weak_script
def _adaptive_max_pool2d(input, output_size, return_indices=False):
# type: (Tensor, BroadcastingList1[int], bool) -> Tensor
return adaptive_max_pool2d_with_indices(input, output_size)[0]
if_false=_adaptive_max_pool2d)
-@torch._jit_internal.weak_script
+@weak_script
def adaptive_max_pool3d_with_indices(input, output_size, return_indices=False):
# type: (Tensor, BroadcastingList1[int], bool) -> Tuple[Tensor, Tensor]
r"""Applies a 3D adaptive max pooling over an input signal composed of
return torch._C._nn.adaptive_max_pool3d(input, output_size)
-@torch._jit_internal.weak_script
+@weak_script
def _adaptive_max_pool3d(input, output_size, return_indices=False):
# type: (Tensor, BroadcastingList1[int], bool) -> Tensor
return adaptive_max_pool3d_with_indices(input, output_size)[0]
""")
-@torch._jit_internal.weak_script
+@weak_script
def adaptive_avg_pool2d(input, output_size):
# type: (Tensor, BroadcastingList2[int]) -> Tensor
r"""
return torch._C._nn.adaptive_avg_pool2d(input, _output_size)
-@torch._jit_internal.weak_script
+@weak_script
def adaptive_avg_pool3d(input, output_size):
# type: (Tensor, BroadcastingList3[int]) -> Tensor
r"""
# Activation functions
-@torch._jit_internal.weak_script
+@weak_script
def dropout(input, p=0.5, training=True, inplace=False):
# type: (Tensor, float, bool, bool) -> Tensor
r"""
else _VF.dropout(input, p, training))
-@torch._jit_internal.weak_script
+@weak_script
def alpha_dropout(input, p=0.5, training=False, inplace=False):
# type: (Tensor, float, bool, bool) -> Tensor
r"""Applies alpha dropout to the input.
else _VF.alpha_dropout(input, p, training))
-@torch._jit_internal.weak_script
+@weak_script
def dropout2d(input, p=0.5, training=True, inplace=False):
# type: (Tensor, float, bool, bool) -> Tensor
r"""
else _VF.feature_dropout(input, p, training))
-@torch._jit_internal.weak_script
+@weak_script
def dropout3d(input, p=0.5, training=True, inplace=False):
# type: (Tensor, float, bool, bool) -> Tensor
r"""
else _VF.feature_dropout(input, p, training))
-@torch._jit_internal.weak_script
+@weak_script
def feature_alpha_dropout(input, p=0.5, training=False, inplace=False):
# type: (Tensor, float, bool, bool) -> Tensor
if p < 0. or p > 1.:
else _VF.feature_alpha_dropout(input, p, training))
-@torch._jit_internal.weak_script
+@weak_script
def threshold(input, threshold, value, inplace=False):
# type: (Tensor, float, float, bool) -> Tensor
r"""Thresholds each element of the input Tensor.
""")
-@torch._jit_internal.weak_script
+@weak_script
def relu(input, inplace=False):
# type: (Tensor, bool) -> Tensor
r"""relu(input, inplace=False) -> Tensor
""")
-@torch._jit_internal.weak_script
+@weak_script
def glu(input, dim=-1):
# type: (Tensor, int) -> Tensor
r"""
return torch._C._nn.glu(input, dim)
-@torch._jit_internal.weak_script
+@weak_script
def hardtanh(input, min_val=-1., max_val=1., inplace=False):
# type: (Tensor, float, float, bool) -> Tensor
r"""
""")
-@torch._jit_internal.weak_script
+@weak_script
def relu6(input, inplace=False):
# type: (Tensor, bool) -> Tensor
r"""relu6(input, inplace=False) -> Tensor
return hardtanh(input, 0., 6., inplace)
-@torch._jit_internal.weak_script
+@weak_script
def elu(input, alpha=1., inplace=False):
# type: (Tensor, float, bool) -> Tensor
r"""Applies element-wise,
""")
-@torch._jit_internal.weak_script
+@weak_script
def selu(input, inplace=False):
# type: (Tensor, bool) -> Tensor
r"""selu(input, inplace=False) -> Tensor
""")
-@torch._jit_internal.weak_script
+@weak_script
def celu(input, alpha=1., inplace=False):
# type: (Tensor, float, bool) -> Tensor
r"""celu(input, alpha=1., inplace=False) -> Tensor
""")
-@torch._jit_internal.weak_script
+@weak_script
def leaky_relu(input, negative_slope=0.01, inplace=False):
# type: (Tensor, float, bool) -> Tensor
r"""
""")
-@torch._jit_internal.weak_script
+@weak_script
def prelu(input, weight):
# type: (Tensor, Tensor) -> Tensor
r"""prelu(input, weight) -> Tensor
return torch.prelu(input, weight)
-@torch._jit_internal.weak_script
+@weak_script
def rrelu(input, lower=1. / 8, upper=1. / 3, training=False, inplace=False):
# type: (Tensor, float, float, bool, bool) -> Tensor
r"""rrelu(input, lower=1./8, upper=1./3, training=False, inplace=False) -> Tensor
""")
-@torch._jit_internal.weak_script
+@weak_script
def hardshrink(input, lambd=0.5):
# type: (Tensor, float) -> Tensor
r"""
return torch.hardshrink(input, lambd)
-@torch._jit_internal.weak_script
+@weak_script
def tanhshrink(input):
r"""tanhshrink(input) -> Tensor
return input - input.tanh()
-@torch._jit_internal.weak_script
+@weak_script
def softsign(input):
r"""softsign(input) -> Tensor
""")
-@torch._jit_internal.weak_script
+@weak_script
def _get_softmax_dim(name, ndim, stacklevel):
# type: (str, int, int) -> int
warnings.warn("Implicit dimension choice for {} has been deprecated. "
return ret
-@torch._jit_internal.weak_script
+@weak_script
def softmin(input, dim=None, _stacklevel=3, dtype=None):
# type: (Tensor, Optional[int], int, Optional[int]) -> Tensor
r"""Applies a softmin function.
return ret
-@torch._jit_internal.weak_script
+@weak_script
def softmax(input, dim=None, _stacklevel=3, dtype=None):
# type: (Tensor, Optional[int], int, Optional[int]) -> Tensor
r"""Applies a softmax function.
return ret
-@torch._jit_internal.weak_script
+@weak_script
def _sample_gumbel(shape, eps=1e-10, out=None):
# type: (List[int], float, Optional[Tensor]) -> Tensor
"""
return - torch.log(eps - torch.log(U + eps))
-@torch._jit_internal.weak_script
+@weak_script
def _gumbel_softmax_sample(logits, tau=1, eps=1e-10):
# type: (Tensor, float, float) -> Tensor
"""
return softmax(y / tau, dims - 1)
-@torch._jit_internal.weak_script
+@weak_script
def gumbel_softmax(logits, tau=1., hard=False, eps=1e-10):
# type: (Tensor, float, bool, float) -> Tensor
r"""
return y
-@torch._jit_internal.weak_script
+@weak_script
def log_softmax(input, dim=None, _stacklevel=3, dtype=None):
# type: (Tensor, Optional[int], int, Optional[int]) -> Tensor
r"""Applies a softmax followed by a logarithm.
""")
-@torch._jit_internal.weak_script
+@weak_script
def tanh(input):
r"""tanh(input) -> Tensor
return input.tanh()
-@torch._jit_internal.weak_script
+@weak_script
def sigmoid(input):
r"""sigmoid(input) -> Tensor
return input.sigmoid()
-@torch._jit_internal.weak_script
+@weak_script
def linear(input, weight, bias=None):
# type: (Tensor, Tensor, Optional[Tensor]) -> Tensor
r"""
return ret
-@torch._jit_internal.weak_script
+@weak_script
def bilinear(input1, input2, weight, bias=None):
# type: (Tensor, Tensor, Tensor, Optional[Tensor]) -> Tensor
return torch.bilinear(input1, input2, weight, bias)
return torch.embedding_renorm_(weight, input, max_norm, norm_type)
-@torch._jit_internal.weak_script
+@weak_script
def embedding(input, weight, padding_idx=None, max_norm=None, norm_type=2.,
scale_grad_by_freq=False, sparse=False):
# type: (Tensor, Tensor, Optional[int], Optional[float], float, bool, bool) -> Tensor
return torch.embedding(weight, input, padding_idx, scale_grad_by_freq, sparse)
-@torch._jit_internal.weak_script
+@weak_script
def embedding_bag(input, weight, offsets=None, max_norm=None, norm_type=2,
scale_grad_by_freq=False, mode='mean', sparse=False):
# type: (Tensor, Tensor, Optional[Tensor], Optional[float], float, bool, str, bool) -> Tensor
return ret
-@torch._jit_internal.weak_script
+@weak_script
def batch_norm(input, running_mean, running_var, weight=None, bias=None,
training=False, momentum=0.1, eps=1e-5):
# type: (Tensor, Tensor, Tensor, Optional[Tensor], Optional[Tensor], bool, float, float) -> Tensor
)
-@torch._jit_internal.weak_script
+@weak_script
def instance_norm(input, running_mean=None, running_var=None, weight=None,
bias=None, use_input_stats=True, momentum=0.1, eps=1e-5):
# type: (Tensor, Optional[Tensor], Optional[Tensor], Optional[Tensor], Optional[Tensor], bool, float, float) -> Tensor # noqa
)
-@torch._jit_internal.weak_script
+@weak_script
def layer_norm(input, normalized_shape, weight=None, bias=None, eps=1e-5):
# type: (Tensor, List[int], Optional[Tensor], Optional[Tensor], float) -> Tensor
r"""Applies Layer Normalization for last certain number of dimensions.
torch.backends.cudnn.enabled)
-@torch._jit_internal.weak_script
+@weak_script
def group_norm(input, num_groups, weight=None, bias=None, eps=1e-5):
# type: (Tensor, int, Optional[Tensor], Optional[Tensor], float) -> Tensor
r"""Applies Group Normalization for last certain number of dimensions.
torch.backends.cudnn.enabled)
-@torch._jit_internal.weak_script
+@weak_script
def local_response_norm(input, size, alpha=1e-4, beta=0.75, k=1.):
# type: (Tensor, int, float, float, float) -> Tensor
r"""Applies local response normalization over an input signal composed of
# loss
-@torch._jit_internal.weak_script
+@weak_script
def ctc_loss(log_probs, targets, input_lengths, target_lengths, blank=0,
reduction='mean'):
# type: (Tensor, Tensor, Tensor, Tensor, int, str) -> Tensor
return torch.ctc_loss(log_probs, targets, input_lengths, target_lengths, blank, _Reduction.get_enum(reduction))
-@torch._jit_internal.weak_script
+@weak_script
def nll_loss(input, target, weight=None, size_average=None, ignore_index=-100,
reduce=None, reduction='mean'):
# type: (Tensor, Tensor, Optional[Tensor], Optional[bool], int, Optional[bool], str) -> Tensor
return ret
-@torch._jit_internal.weak_script
+@weak_script
def poisson_nll_loss(input, target, log_input=True, full=False, size_average=None, eps=1e-8,
reduce=None, reduction='mean'):
# type: (Tensor, Tensor, bool, bool, Optional[bool], float, Optional[bool], str) -> Tensor
return ret
-@torch._jit_internal.weak_script
+@weak_script
def kl_div(input, target, size_average=None, reduce=None, reduction='mean'):
# type: (Tensor, Tensor, Optional[bool], Optional[bool], str) -> Tensor
r"""The `Kullback-Leibler divergence`_ Loss.
return reduced
-@torch._jit_internal.weak_script
+@weak_script
def cross_entropy(input, target, weight=None, size_average=None, ignore_index=-100,
reduce=None, reduction='mean'):
# type: (Tensor, Tensor, Optional[Tensor], Optional[bool], int, Optional[bool], str) -> Tensor
return nll_loss(log_softmax(input, 1), target, weight, None, ignore_index, None, reduction)
-@torch._jit_internal.weak_script
+@weak_script
def binary_cross_entropy(input, target, weight=None, size_average=None,
reduce=None, reduction='mean'):
# type: (Tensor, Tensor, Optional[Tensor], Optional[bool], Optional[bool], str) -> Tensor
input, target, weight, reduction_enum)
-@torch._jit_internal.weak_script
+@weak_script
def binary_cross_entropy_with_logits(input, target, weight=None, size_average=None,
reduce=None, reduction='mean', pos_weight=None):
# type: (Tensor, Tensor, Optional[Tensor], Optional[bool], Optional[bool], str, Optional[Tensor]) -> Tensor
return lambd_optimized(expanded_input, expanded_target, _Reduction.get_enum(reduction))
-@torch._jit_internal.weak_script
+@weak_script
def _smooth_l1_loss(input, target):
# type: (Tensor, Tensor) -> Tensor
t = torch.abs(input - target)
return torch.where(t < 1, 0.5 * t ** 2, t - 0.5)
-@torch._jit_internal.weak_script
+@weak_script
def smooth_l1_loss(input, target, size_average=None, reduce=None, reduction='mean'):
# type: (Tensor, Tensor, Optional[bool], Optional[bool], str) -> Tensor
r"""Function that uses a squared term if the absolute
return ret
-@torch._jit_internal.weak_script
+@weak_script
def l1_loss(input, target, size_average=None, reduce=None, reduction='mean'):
# type: (Tensor, Tensor, Optional[bool], Optional[bool], str) -> Tensor
r"""l1_loss(input, target, size_average=None, reduce=None, reduction='mean') -> Tensor
return ret
-@torch._jit_internal.weak_script
+@weak_script
def mse_loss(input, target, size_average=None, reduce=None, reduction='mean'):
# type: (Tensor, Tensor, Optional[bool], Optional[bool], str) -> Tensor
r"""mse_loss(input, target, size_average=None, reduce=None, reduction='mean') -> Tensor
return ret
-@torch._jit_internal.weak_script
+@weak_script
def margin_ranking_loss(input1, input2, target, margin=0, size_average=None,
reduce=None, reduction='mean'):
# type: (Tensor, Tensor, Tensor, float, Optional[bool], Optional[bool], str) -> Tensor
return torch.margin_ranking_loss(input1, input2, target, margin, reduction_enum)
-@torch._jit_internal.weak_script
+@weak_script
def hinge_embedding_loss(input, target, margin=1.0, size_average=None,
reduce=None, reduction='mean'):
# type: (Tensor, Tensor, float, Optional[bool], Optional[bool], str) -> Tensor
return torch.hinge_embedding_loss(input, target, margin, reduction_enum)
-@torch._jit_internal.weak_script
+@weak_script
def multilabel_margin_loss(input, target, size_average=None, reduce=None, reduction='mean'):
# type: (Tensor, Tensor, Optional[bool], Optional[bool], str) -> Tensor
r"""multilabel_margin_loss(input, target, size_average=None, reduce=None, reduction='mean') -> Tensor
return torch._C._nn.multilabel_margin_loss(input, target, reduction_enum)
-@torch._jit_internal.weak_script
+@weak_script
def soft_margin_loss(input, target, size_average=None, reduce=None, reduction='mean'):
# type: (Tensor, Tensor, Optional[bool], Optional[bool], str) -> Tensor
r"""soft_margin_loss(input, target, size_average=None, reduce=None, reduction='mean') -> Tensor
return torch._C._nn.soft_margin_loss(input, target, reduction_enum)
-@torch._jit_internal.weak_script
+@weak_script
def multilabel_soft_margin_loss(input, target, weight=None, size_average=None,
reduce=None, reduction='mean'):
# type: (Tensor, Tensor, Optional[Tensor], Optional[bool], Optional[bool], str) -> Tensor
return ret
-@torch._jit_internal.weak_script
+@weak_script
def cosine_embedding_loss(input1, input2, target, margin=0, size_average=None,
reduce=None, reduction='mean'):
# type: (Tensor, Tensor, Tensor, float, Optional[bool], Optional[bool], str) -> Tensor
return torch.cosine_embedding_loss(input1, input2, target, margin, reduction_enum)
-@torch._jit_internal.weak_script
+@weak_script
def multi_margin_loss(input, target, p=1, margin=1., weight=None, size_average=None,
reduce=None, reduction='mean'):
# type: (Tensor, Tensor, int, float, Optional[Tensor], Optional[bool], Optional[bool], str) -> Tensor
}
-@torch._jit_internal.weak_script
+@weak_script
def grid_sample(input, grid, mode='bilinear', padding_mode='zeros'):
# type: (Tensor, Tensor, str, str) -> Tensor
r"""Given an :attr:`input` and a flow-field :attr:`grid`, computes the
return torch.grid_sampler(input, grid, mode_enum, padding_mode_enum)
-@torch._jit_internal.weak_script
+@weak_script
def affine_grid(theta, size):
# type: (Tensor, List[int]) -> Tensor
r"""Generates a 2d flow field, given a batch of affine matrices :attr:`theta`
return vision.affine_grid_generator(theta, size)
-@torch._jit_internal.weak_script
+@weak_script
def pad(input, pad, mode='constant', value=0):
# type: (Tensor, List[int], str, float) -> Tensor
r"""Pads tensor.
# distance
-@torch._jit_internal.weak_script
+@weak_script
def pairwise_distance(x1, x2, p=2., eps=1e-6, keepdim=False):
# type: (Tensor, Tensor, float, float, bool) -> Tensor
r"""
""")
-@torch._jit_internal.weak_script
+@weak_script
def triplet_margin_loss(anchor, positive, negative, margin=1.0, p=2, eps=1e-6, swap=False, size_average=None,
reduce=None, reduction="mean"):
# type: (Tensor, Tensor, Tensor, float, float, float, bool, Optional[bool], Optional[bool], str) -> Tensor
swap, reduction_enum)
-@torch._jit_internal.weak_script
+@weak_script
def normalize(input, p=2, dim=1, eps=1e-12, out=None):
# type: (Tensor, float, int, float, Optional[Tensor]) -> Tensor
r"""Performs :math:`L_p` normalization of inputs over specified dimension.
assert isinstance(arg, int) or len(arg) == 2, message.format(arg_name)
-@torch._jit_internal.weak_script
+@weak_script
def unfold(input, kernel_size, dilation=1, padding=0, stride=1):
# type: (Tensor, BroadcastingList2[int], BroadcastingList2[int], BroadcastingList2[int], BroadcastingList2[int]) -> Tensor # noqa
r"""Extracts sliding local blocks from an batched input tensor.
return ret
-@torch._jit_internal.weak_script
+@weak_script
def fold(input, output_size, kernel_size, dilation=1, padding=0, stride=1):
# type: (Tensor, BroadcastingList2[int], BroadcastingList2[int], BroadcastingList2[int], BroadcastingList2[int], BroadcastingList2[int]) -> Tensor # noqa
r"""Combines an array of sliding local blocks into a large containing