_(aten, max_unpool3d_forward) \
_(aten, max_values) \
_(aten, mean) \
+_(aten, nanmean) \
_(aten, median) \
_(aten, nanmedian) \
_(aten, meshgrid) \
return at::mean_out(result, self, dimnames_to_positions(self, dim), keepdim, opt_dtype);
}
+// TODO(@heitorschueroff) implement custom kernels for nanmean
+Tensor& nanmean_out(
+ const Tensor& self,
+ IntArrayRef dim,
+ bool keepdim,
+ c10::optional<ScalarType> opt_dtype,
+ Tensor& result) {
+ TORCH_CHECK(
+ self.is_floating_point(),
+ "nanmean(): expected input to have floating point dtype but got ",
+ self.scalar_type());
+ const auto factor = at::native::isnan(self).logical_not_().sum(dim, keepdim);
+ at::native::nansum_out(self, dim, keepdim, opt_dtype, result).div_(factor);
+ return result;
+}
+
+Tensor nanmean(
+ const Tensor& self,
+ IntArrayRef dim,
+ bool keepdim,
+ optional<ScalarType> opt_dtype) {
+ TORCH_CHECK(
+ self.is_floating_point(),
+ "nanmean(): expected input to have floating point dtype but got ",
+ self.scalar_type());
+ const auto factor =
+ at::native::isnan(self.detach()).logical_not_().sum(dim, keepdim);
+ return at::nansum(self, dim, keepdim, opt_dtype).div_(factor);
+}
+
static Tensor squeeze_multiple(const Tensor& self, IntArrayRef dims) {
int ndims = self.sizes().size();
auto dims_to_squeeze = at::dim_list_to_bitset(dims, ndims);
- func: mean.names_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
device_check: NoCheck # TensorIterator
+- func: nanmean(Tensor self, int[1] dim=[], bool keepdim=False, *, ScalarType? dtype=None) -> Tensor
+ device_check: NoCheck # Composite
+ variants: function, method
+
+- func: nanmean.out(Tensor self, int[1] dim=[], bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)
+ device_check: NoCheck # Composite
+
- func: median(Tensor self) -> Tensor
variants: function, method
dispatch:
Tensor.max
Tensor.maximum
Tensor.mean
+ Tensor.nanmean
Tensor.median
Tensor.nanmedian
Tensor.min
dist
logsumexp
mean
+ nanmean
median
nanmedian
mode
See :func:`torch.argmax`
""")
-add_docstr_all('mean',
- r"""
-mean(dim=None, keepdim=False) -> Tensor or (Tensor, Tensor)
+add_docstr_all('mean', r"""
+mean(dim=None, keepdim=False, *, dtype=None) -> Tensor
See :func:`torch.mean`
""")
+add_docstr_all('nanmean', r"""
+nanmean(dim=None, keepdim=False, *, dtype=None) -> Tensor
+
+See :func:`torch.nanmean`
+""")
+
add_docstr_all('median',
r"""
median(dim=None, keepdim=False) -> (Tensor, LongTensor)
tensor([ 0, 2, 0, 1])
""".format(**single_dim_common))
-add_docstr(torch.mean,
- r"""
-mean(input) -> Tensor
+add_docstr(torch.mean, r"""
+mean(input, *, dtype=None) -> Tensor
Returns the mean value of all elements in the :attr:`input` tensor.
Args:
{input}
+Keyword args:
+ {dtype}
+
Example::
>>> a = torch.randn(1, 3)
>>> torch.mean(a)
tensor(0.3367)
-.. function:: mean(input, dim, keepdim=False, *, out=None) -> Tensor
+.. function:: mean(input, dim, keepdim=False, *, dtype=None, out=None) -> Tensor
:noindex:
Returns the mean value of each row of the :attr:`input` tensor in the given
{keepdim}
Keyword args:
+ {dtype}
{out}
+.. seealso::
+
+ :func:`torch.nanmean` computes the mean value of `non-NaN` elements.
+
Example::
>>> a = torch.randn(4, 4)
[ 0.1807]])
""".format(**multi_dim_common))
+add_docstr(torch.nanmean, r"""
+nanmean(input, dim=None, keepdim=False, *, dtype=None, out=None) -> Tensor
+
+Computes the mean of all `non-NaN` elements along the specified dimensions.
+
+This function is identical to :func:`torch.mean` when there are no `NaN` values
+in the :attr:`input` tensor. In the presence of `NaN`, :func:`torch.mean` will
+propagate the `NaN` to the output whereas :func:`torch.nanmean` will ignore the
+`NaN` values (`torch.nanmean(a)` is equivalent to `torch.mean(a[~a.isnan()])`).
+
+{keepdim_details}
+
+Args:
+ {input}
+ {dim} If `None`, reduces all dimensions. Default is `None`.
+ {keepdim}
+
+Keyword args:
+ {dtype}
+ {out}
+
+.. seealso::
+
+ :func:`torch.mean` computes the mean value, propagating `NaN`.
+
+Example::
+
+ >>> x = torch.tensor([[torch.nan, 1, 2], [1, 2, 3]])
+ >>> x.mean()
+ tensor(nan)
+ >>> x.nanmean()
+ tensor(1.8000)
+ >>> x.mean(dim=0)
+ tensor([ nan, 1.5000, 2.5000])
+ >>> x.nanmean(dim=0)
+ tensor([1.0000, 1.5000, 2.5000])
+
+ # If all elements in the reduced dimensions are NaN then the result is NaN
+ >>> torch.tensor([torch.nan]).nanmean()
+ tensor(nan)
+""".format(**multi_dim_common))
+
add_docstr(torch.median,
r"""
median(input) -> Tensor
torch.max_pool1d_with_indices: (lambda input, kernel_size, stride=None, padding=0, dilation=1,
return_indices=False, ceil_mode=False: -1),
torch.mean: lambda input, dim=None: -1,
+ torch.nanmean: lambda input, dim=None, keepdim=False, dtype=None, out=None: -1,
torch.median: lambda input, dim=None: -1,
torch.nanmedian: lambda input, dim=None: -1,
torch.meshgrid: lambda *tensors, **kwargs: -1,
requires_grad=requires_grad),))
return inputs
+def _generate_nan_reduction_inputs(device, dtype, requires_grad):
+ yield from _generate_reduction_inputs(device, dtype, requires_grad)
+ yield torch.tensor([2, torch.nan, -1], device=device, dtype=dtype, requires_grad=requires_grad)
+ yield torch.tensor([[torch.nan, 2], [0, 1]], device=device, dtype=dtype, requires_grad=requires_grad)
+
+def sample_inputs_nan_reduction(supports_multiple_dims):
+ # Generates sample inputs for reduction ops that contain the input tensor
+ # and dim and keepdim kwargs. If a reduction op needs to test additional
+ # args/kwargs then create a separate sample_inputs function
+ def fn(op_info, device, dtype, requires_grad):
+ inputs = []
+
+ for t in _generate_nan_reduction_inputs(device, dtype, requires_grad):
+ # Add case without dim and keepdim kwargs
+ inputs.append(SampleInput(t))
+ for kwargs in _generate_reduction_kwargs(t.ndim, supports_multiple_dims):
+ inputs.append(SampleInput(t, kwargs=kwargs))
+
+ return inputs
+
+ return fn
+
def sample_inputs_reduction_quantile(op_info, device, dtype, requires_grad):
test_quantiles = (0.5, make_tensor((2,), device, dtype, low=0, high=1))
test_interpolations = ['linear', 'midpoint']
),
),
ReductionOpInfo(
+ 'nanmean',
+ nan_policy='omit',
+ assert_autodiffed=True,
+ promotes_int_to_float=True,
+ dtypes=floating_types_and(torch.float16, torch.bfloat16),
+ sample_inputs_func=sample_inputs_nan_reduction(supports_multiple_dims=True),
+ ref=reference_reduction_numpy(np.nanmean),
+ skips=(
+ # RuntimeError: deepEquals(input.iValue, deepCopiedInput)INTERNAL ASSERT FAILED at
+ # "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":142, please report a bug to PyTorch.
+ SkipInfo('TestJit', 'test_variant_consistency_jit'),
+ # FIXME: prod reduces all dimensions when dim=[]
+ SkipInfo('TestReductions', 'test_dim_empty'),
+ SkipInfo('TestReductions', 'test_dim_empty_keepdim'),
+ # FIXME: improve precision
+ SkipInfo('TestReductions', 'test_noncontiguous_all',
+ dtypes=[torch.float16]),
+ SkipInfo('TestReductions', 'test_ref_small_input',
+ dtypes=[torch.float16]),
+ SkipInfo('TestReductions', 'test_ref_duplicate_values',
+ device_type='cuda', dtypes=[torch.float16]),
+ SkipInfo('TestReductions', 'test_ref_extremal_values',
+ device_type='cuda', dtypes=[torch.complex64]),
+ ),
+ ),
+ ReductionOpInfo(
'prod',
identity=1,
nan_policy='propagate',