.. _Fractional MaxPooling:
http://arxiv.org/abs/1412.6071
"""
- if has_torch_function_unary(input):
+ if has_torch_function_variadic(input, _random_samples):
return handle_torch_function(
fractional_max_pool2d_with_indices,
- (input,),
+ (input, _random_samples),
input,
kernel_size,
output_size=output_size,
return_indices: bool = False,
_random_samples: Optional[Tensor] = None
) -> Tensor:
- if has_torch_function_unary(input):
+ if has_torch_function_variadic(input, _random_samples):
return handle_torch_function(
fractional_max_pool2d,
- (input,),
+ (input, _random_samples),
input,
kernel_size,
output_size=output_size,
.. _Fractional MaxPooling:
http://arxiv.org/abs/1412.6071
"""
- if has_torch_function_unary(input):
+ if has_torch_function_variadic(input, _random_samples):
return handle_torch_function(
fractional_max_pool3d_with_indices,
- (input,),
+ (input, _random_samples),
input,
kernel_size,
output_size=output_size,
return_indices: bool = False,
_random_samples: Optional[Tensor] = None
) -> Tensor:
- if has_torch_function_unary(input):
+ if has_torch_function_variadic(input, _random_samples):
return handle_torch_function(
fractional_max_pool3d,
- (input,),
+ (input, _random_samples),
input,
kernel_size,
output_size=output_size,
- Bias: :math:`(out\_features)`
- Output: :math:`(N, *, out\_features)`
"""
- if has_torch_function_variadic(input, weight):
- return handle_torch_function(linear, (input, weight), input, weight, bias=bias)
+ if has_torch_function_variadic(input, weight, bias):
+ return handle_torch_function(linear, (input, weight, bias), input, weight, bias=bias)
return torch._C._nn.linear(input, weight, bias)
- output: :math:`(N, *, H_{out})` where :math:`H_{out}=\text{out\_features}`
and all but the last dimension are the same shape as the input.
"""
- if has_torch_function_variadic(input1, input2, weight):
+ if has_torch_function_variadic(input1, input2, weight, bias):
return handle_torch_function(
bilinear,
- (input1, input2, weight),
+ (input1, input2, weight, bias),
input1, input2, weight,
bias=bias
)
tensor([[ 0.0000, 0.0000, 0.0000],
[-0.7082, 3.2145, -2.6251]])
"""
- if has_torch_function_variadic(input, weight):
+ if has_torch_function_variadic(input, weight, offsets, per_sample_weights):
return handle_torch_function(
embedding_bag,
- (input, weight),
+ (input, weight, offsets, per_sample_weights),
input,
weight,
offsets=offsets,
See :class:`~torch.nn.BatchNorm1d`, :class:`~torch.nn.BatchNorm2d`,
:class:`~torch.nn.BatchNorm3d` for details.
"""
- if has_torch_function_unary(input):
+ if has_torch_function_variadic(input, running_mean, running_var, weight, bias):
return handle_torch_function(
batch_norm,
- (input,),
+ (input, running_mean, running_var, weight, bias),
input,
running_mean,
running_var,
See :class:`~torch.nn.InstanceNorm1d`, :class:`~torch.nn.InstanceNorm2d`,
:class:`~torch.nn.InstanceNorm3d` for details.
"""
- if has_torch_function_unary(input):
+ if has_torch_function_variadic(input, running_mean, running_var, weight, bias):
return handle_torch_function(
instance_norm,
- (input,),
+ (input, running_mean, running_var, weight, bias),
input,
running_mean=running_mean,
running_var=running_var,
See :class:`~torch.nn.LayerNorm` for details.
"""
- if has_torch_function_unary(input):
+ if has_torch_function_variadic(input, weight, bias):
return handle_torch_function(
- layer_norm, (input,), input, normalized_shape, weight=weight, bias=bias, eps=eps
+ layer_norm, (input, weight, bias), input, normalized_shape, weight=weight, bias=bias, eps=eps
)
return torch.layer_norm(input, normalized_shape, weight, bias, eps, torch.backends.cudnn.enabled)
See :class:`~torch.nn.GroupNorm` for details.
"""
- if has_torch_function_unary(input):
- return handle_torch_function(group_norm, (input,), input, num_groups, weight=weight, bias=bias, eps=eps)
+ if has_torch_function_variadic(input, weight, bias):
+ return handle_torch_function(group_norm, (input, weight, bias,), input, num_groups, weight=weight, bias=bias, eps=eps)
_verify_batch_size([input.size(0) * input.size(1) // num_groups, num_groups] + list(input.size()[2:]))
return torch.group_norm(input, num_groups, weight, bias, eps, torch.backends.cudnn.enabled)
>>> output = F.nll_loss(F.log_softmax(input), target)
>>> output.backward()
"""
- if has_torch_function_variadic(input, target):
+ if has_torch_function_variadic(input, target, weight):
return handle_torch_function(
nll_loss,
- (input, target),
+ (input, target, weight),
input,
target,
weight=weight,
>>> loss = F.cross_entropy(input, target)
>>> loss.backward()
"""
- if has_torch_function_variadic(input, target):
+ if has_torch_function_variadic(input, target, weight):
return handle_torch_function(
cross_entropy,
- (input, target),
+ (input, target, weight),
input,
target,
weight=weight,
>>> loss = F.binary_cross_entropy(F.sigmoid(input), target)
>>> loss.backward()
"""
- if has_torch_function_variadic(input, target):
+ if has_torch_function_variadic(input, target, weight):
return handle_torch_function(
binary_cross_entropy,
- (input, target),
+ (input, target, weight),
input,
target,
weight=weight,
>>> loss = F.binary_cross_entropy_with_logits(input, target)
>>> loss.backward()
"""
- if has_torch_function_variadic(input, target):
+ if has_torch_function_variadic(input, target, weight, pos_weight):
return handle_torch_function(
binary_cross_entropy_with_logits,
- (input, target),
+ (input, target, weight, pos_weight),
input,
target,
weight=weight,
See :class:`~torch.nn.MultiLabelSoftMarginLoss` for details.
"""
- if has_torch_function_variadic(input, target):
+ if has_torch_function_variadic(input, target, weight):
return handle_torch_function(
multilabel_soft_margin_loss,
- (input, target),
+ (input, target, weight),
input,
target,
weight=weight,
See :class:`~torch.nn.MultiMarginLoss` for details.
"""
- if has_torch_function_variadic(input, target):
+ if has_torch_function_variadic(input, target, weight):
return handle_torch_function(
multi_margin_loss,
- (input, target),
+ (input, target, weight),
input,
target,
p=p,
out (Tensor, optional): the output tensor. If :attr:`out` is used, this
operation won't be differentiable.
"""
- if has_torch_function_unary(input):
- return handle_torch_function(normalize, (input,), input, p=p, dim=dim, eps=eps, out=out)
+ if has_torch_function_variadic(input, out):
+ return handle_torch_function(normalize, (input, out), input, p=p, dim=dim, eps=eps, out=out)
if out is None:
denom = input.norm(p, dim, keepdim=True).clamp_min(eps).expand_as(input)
return input / denom