)
+@torch._jit_internal.weak_script
def instance_norm(input, running_mean=None, running_var=None, weight=None,
bias=None, use_input_stats=True, momentum=0.1, eps=1e-5):
# type: (Tensor, Optional[Tensor], Optional[Tensor], Optional[Tensor], Optional[Tensor], bool, float, float) -> Tensor # noqa
)
+@torch._jit_internal.weak_script
def layer_norm(input, normalized_shape, weight=None, bias=None, eps=1e-5):
+ # type: (Tensor, List[int], Optional[Tensor], Optional[Tensor], float) -> Tensor
r"""Applies Layer Normalization for last certain number of dimensions.
See :class:`~torch.nn.LayerNorm` for details.
torch.backends.cudnn.enabled)
+@torch._jit_internal.weak_script
def group_norm(input, num_groups, weight=None, bias=None, eps=1e-5):
+ # type: (Tensor, int, Optional[Tensor], Optional[Tensor], float) -> Tensor
r"""Applies Group Normalization for last certain number of dimensions.
See :class:`~torch.nn.GroupNorm` for details.
input, target, weight, reduction_enum)
+@torch._jit_internal.weak_script
def binary_cross_entropy_with_logits(input, target, weight=None, size_average=None,
reduce=None, reduction='mean', pos_weight=None):
# type: (Tensor, Tensor, Optional[Tensor], Optional[bool], Optional[bool], str, Optional[Tensor]) -> Tensor
return _pointwise_loss(lambda a, b: (a - b) ** 2, torch._C._nn.mse_loss, input, target, reduction)
+@torch._jit_internal.weak_script
def margin_ranking_loss(input1, input2, target, margin=0, size_average=None,
reduce=None, reduction='mean'):
+ # type: (Tensor, Tensor, Tensor, float, Optional[bool], Optional[bool], str) -> Tensor
r"""margin_ranking_loss(input1, input2, target, margin=0, size_average=None, reduce=None, reduction='mean') -> Tensor
See :class:`~torch.nn.MarginRankingLoss` for details.
""" # noqa
if size_average is not None or reduce is not None:
- reduction = _Reduction.legacy_get_enum(size_average, reduce)
+ reduction_enum = _Reduction.legacy_get_enum(size_average, reduce)
else:
- reduction = _Reduction.get_enum(reduction)
+ reduction_enum = _Reduction.get_enum(reduction)
if input1.dim() == 0 or input2.dim() == 0 or target.dim() == 0:
raise RuntimeError(("margin_ranking_loss does not support scalars, got sizes: "
"input1: {}, input2: {}, target: {} ".format(input1.size(), input2.size(), target.size())))
- return torch.margin_ranking_loss(input1, input2, target, margin, reduction)
+ return torch.margin_ranking_loss(input1, input2, target, margin, reduction_enum)
+@torch._jit_internal.weak_script
def hinge_embedding_loss(input, target, margin=1.0, size_average=None,
reduce=None, reduction='mean'):
+ # type: (Tensor, Tensor, float, Optional[bool], Optional[bool], str) -> Tensor
r"""hinge_embedding_loss(input, target, margin=1.0, size_average=None, reduce=None, reduction='mean') -> Tensor
See :class:`~torch.nn.HingeEmbeddingLoss` for details.
""" # noqa
if size_average is not None or reduce is not None:
- reduction = _Reduction.legacy_get_enum(size_average, reduce)
+ reduction_enum = _Reduction.legacy_get_enum(size_average, reduce)
else:
- reduction = _Reduction.get_enum(reduction)
- return torch.hinge_embedding_loss(input, target, margin, reduction)
+ reduction_enum = _Reduction.get_enum(reduction)
+ return torch.hinge_embedding_loss(input, target, margin, reduction_enum)
+@torch._jit_internal.weak_script
def multilabel_margin_loss(input, target, size_average=None, reduce=None, reduction='mean'):
+ # type: (Tensor, Tensor, Optional[bool], Optional[bool], str) -> Tensor
r"""multilabel_margin_loss(input, target, size_average=None, reduce=None, reduction='mean') -> Tensor
See :class:`~torch.nn.MultiLabelMarginLoss` for details.
"""
if size_average is not None or reduce is not None:
- reduction = _Reduction.legacy_get_enum(size_average, reduce)
+ reduction_enum = _Reduction.legacy_get_enum(size_average, reduce)
else:
- reduction = _Reduction.get_enum(reduction)
- return torch._C._nn.multilabel_margin_loss(input, target, reduction)
+ reduction_enum = _Reduction.get_enum(reduction)
+ return torch._C._nn.multilabel_margin_loss(input, target, reduction_enum)
+@torch._jit_internal.weak_script
def soft_margin_loss(input, target, size_average=None, reduce=None, reduction='mean'):
+ # type: (Tensor, Tensor, Optional[bool], Optional[bool], str) -> Tensor
r"""soft_margin_loss(input, target, size_average=None, reduce=None, reduction='mean') -> Tensor
See :class:`~torch.nn.SoftMarginLoss` for details.
"""
if size_average is not None or reduce is not None:
- reduction = _Reduction.legacy_get_enum(size_average, reduce)
+ reduction_enum = _Reduction.legacy_get_enum(size_average, reduce)
else:
- reduction = _Reduction.get_enum(reduction)
- return torch._C._nn.soft_margin_loss(input, target, reduction)
+ reduction_enum = _Reduction.get_enum(reduction)
+ return torch._C._nn.soft_margin_loss(input, target, reduction_enum)
+@torch._jit_internal.weak_script
def multilabel_soft_margin_loss(input, target, weight=None, size_average=None,
reduce=None, reduction='mean'):
# type: (Tensor, Tensor, Optional[Tensor], Optional[bool], Optional[bool], str) -> Tensor
return ret
+@torch._jit_internal.weak_script
def cosine_embedding_loss(input1, input2, target, margin=0, size_average=None,
reduce=None, reduction='mean'):
+ # type: (Tensor, Tensor, Tensor, float, Optional[bool], Optional[bool], str) -> Tensor
r"""cosine_embedding_loss(input1, input2, target, margin=0, size_average=None, reduce=None, reduction='mean') -> Tensor
See :class:`~torch.nn.CosineEmbeddingLoss` for details.
""" # noqa
if size_average is not None or reduce is not None:
- reduction = _Reduction.legacy_get_enum(size_average, reduce)
+ reduction_enum = _Reduction.legacy_get_enum(size_average, reduce)
else:
- reduction = _Reduction.get_enum(reduction)
- return torch.cosine_embedding_loss(input1, input2, target, margin, reduction)
+ reduction_enum = _Reduction.get_enum(reduction)
+ return torch.cosine_embedding_loss(input1, input2, target, margin, reduction_enum)
@torch._jit_internal.weak_script
""")
+@torch._jit_internal.weak_script
def triplet_margin_loss(anchor, positive, negative, margin=1.0, p=2, eps=1e-6, swap=False, size_average=None,
reduce=None, reduction="mean"):
+ # type: (Tensor, Tensor, Tensor, float, float, float, bool, Optional[bool], Optional[bool], str) -> Tensor
r"""
See :class:`~torch.nn.TripletMarginLoss` for details
"""
if size_average is not None or reduce is not None:
- reduction = _Reduction.legacy_get_enum(size_average, reduce)
+ reduction_enum = _Reduction.legacy_get_enum(size_average, reduce)
else:
- reduction = _Reduction.get_enum(reduction)
+ reduction_enum = _Reduction.get_enum(reduction)
return torch.triplet_margin_loss(anchor, positive, negative, margin, p, eps,
- swap, reduction)
+ swap, reduction_enum)
+@torch._jit_internal.weak_script
def normalize(input, p=2, dim=1, eps=1e-12, out=None):
# type: (Tensor, float, int, float, Optional[Tensor]) -> Tensor
r"""Performs :math:`L_p` normalization of inputs over specified dimension.