def skips_mvlgamma(skip_redundant=False):
skips = (
# outside domain values are hard error for mvlgamma op.
- SkipInfo('TestUnaryUfuncs', 'test_float_domains'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_float_domains'),
)
if skip_redundant:
# Redundant tests
skips = skips + ( # type: ignore[assignment]
- SkipInfo('TestGradients'),
- SkipInfo('TestJit'),
- SkipInfo('TestCommon'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestGradients'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestJit'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestCommon'),
)
return skips
dtypes=all_types_and_complex_and(torch.half, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
skips=(
- SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',
- device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
- SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
- device_type='cpu', dtypes=[torch.cfloat]),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
+ device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
+ device_type='cpu', dtypes=[torch.cfloat]),
# Reference: https://github.com/pytorch/pytorch/issues/49224
- SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal',
- dtypes=[torch.int8], active_if=TEST_WITH_ASAN),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal',
+ dtypes=[torch.int8], active_if=TEST_WITH_ASAN),
# TODO: Fix test_out_arg_all_dtypes as torch.empty_like(expected_output) where expected_output=op(input)
# We can break the logic of the loop over all possible types but it is OK.
# https://github.com/pytorch/pytorch/blob/master/test/test_unary_ufuncs.py#L440-L449
- SkipInfo('TestUnaryUfuncs', 'test_out_arg_all_dtypes',
- dtypes=[torch.cfloat, torch.cdouble]),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_out_arg_all_dtypes',
+ dtypes=[torch.cfloat, torch.cdouble]),
),
supports_inplace_autograd=False,
assert_autodiffed=True,
torch.complex64: 1e-2}),),
safe_casts_outputs=True,
skips=(
- SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
- device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
- SkipInfo('TestGradients', 'test_fn_grad',
- dtypes=[torch.cdouble], active_if=IS_WINDOWS),
- SkipInfo('TestGradients', 'test_method_grad',
- dtypes=[torch.cdouble], active_if=IS_WINDOWS),
- SkipInfo('TestGradients', 'test_inplace_grad',
- dtypes=[torch.cdouble], active_if=IS_WINDOWS),
- SkipInfo('TestGradients', 'test_forward_mode_AD',
- dtypes=[torch.cdouble], active_if=IS_WINDOWS),
- SkipInfo('TestGradients', 'test_inplace_forward_mode_AD',
- dtypes=[torch.cdouble], active_if=IS_WINDOWS),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
+ device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_fn_grad',
+ dtypes=[torch.cdouble], active_if=IS_WINDOWS),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_method_grad',
+ dtypes=[torch.cdouble], active_if=IS_WINDOWS),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_inplace_grad',
+ dtypes=[torch.cdouble], active_if=IS_WINDOWS),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_forward_mode_AD',
+ dtypes=[torch.cdouble], active_if=IS_WINDOWS),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_inplace_forward_mode_AD',
+ dtypes=[torch.cdouble], active_if=IS_WINDOWS),
)),
# NOTE: the derivative for inplace acosh is not implemented
UnaryUfuncInfo('acosh',
supports_inplace_autograd=False,
supports_forward_ad=True,
skips=(
- SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',
- device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
- SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
- device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
- SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',
- device_type='cuda', dtypes=[torch.cdouble],
- active_if=IS_WINDOWS),
- SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
- device_type='cuda', dtypes=[torch.cdouble],
- active_if=IS_WINDOWS),
- SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal',
- device_type='cuda', dtypes=[torch.cdouble],
- active_if=IS_WINDOWS),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
+ device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
+ device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
+ device_type='cuda', dtypes=[torch.cdouble],
+ active_if=IS_WINDOWS),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
+ device_type='cuda', dtypes=[torch.cdouble],
+ active_if=IS_WINDOWS),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal',
+ device_type='cuda', dtypes=[torch.cdouble],
+ active_if=IS_WINDOWS),
# Reference: https://github.com/pytorch/pytorch/issues/50692
- SkipInfo('TestGradients', 'test_fn_grad',
- device_type='cuda', dtypes=[torch.cdouble], active_if=IS_WINDOWS),
- SkipInfo('TestGradients', 'test_method_grad',
- device_type='cuda', dtypes=[torch.cdouble], active_if=IS_WINDOWS),
- SkipInfo('TestGradients', 'test_forward_mode_AD',
- dtypes=[torch.cdouble]),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_fn_grad',
+ device_type='cuda', dtypes=[torch.cdouble], active_if=IS_WINDOWS),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_method_grad',
+ device_type='cuda', dtypes=[torch.cdouble], active_if=IS_WINDOWS),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_forward_mode_AD',
+ dtypes=[torch.cdouble]),
)),
BinaryUfuncInfo('add',
# NumPy has no builtin reference for the alpha kwarg, but it is easy enough to emulate
skips=(
# FIXME: bfloat16 backward support likely depends on CUDA11+
# and SM53+
- SkipInfo('TestCommon', 'test_dtypes', active_if=IS_WINDOWS),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_dtypes', active_if=IS_WINDOWS),
# addbmm does not correctly warn when resizing out= inputs
- SkipInfo('TestCommon', 'test_out'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out'),
# https://github.com/pytorch/pytorch/issues/55907
- SkipInfo('TestCommon', 'test_variant_consistency_eager'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager'),
),
sample_inputs_func=sample_inputs_addbmm),
OpInfo('baddbmm',
skips=(
# FIXME: bfloat16 backward support likely depends on CUDA11+
# and SM53+
- SkipInfo('TestCommon', 'test_dtypes', active_if=IS_WINDOWS),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_dtypes', active_if=IS_WINDOWS),
# baddbmm does not correctly warn when resizing out= inputs
- SkipInfo('TestCommon', 'test_out'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out'),
),
sample_inputs_func=sample_inputs_baddbmm),
OpInfo('dot',
skips=(
# FIXME: bfloat16 backward support likely depends on CUDA11+
# and SM53+
- SkipInfo('TestCommon', 'test_dtypes', active_if=IS_WINDOWS),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_dtypes', active_if=IS_WINDOWS),
# bmm does not correctly warn when resizing out= inputs
- SkipInfo('TestCommon', 'test_out'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out'),
),
sample_inputs_func=sample_inputs_bmm),
OpInfo('mv',
dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if CUDA11OrLater else []),
skips=(
# bmm does not correctly warn when resizing out= inputs
- SkipInfo('TestCommon', 'test_out'),),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out'),),
assert_autodiffed=True,
sample_inputs_func=sample_inputs_mv),
OpInfo('addr',
supports_forward_ad=True,
skips=(
# Reference: https://github.com/pytorch/pytorch/issues/50747
- SkipInfo('TestCommon', 'test_variant_consistency_eager',
- dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16)),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager',
+ dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16)),
),
sample_inputs_func=sample_inputs_addr,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL),
supports_inplace_autograd=False,
skips=(
# TODO: update sample inputs with for_inplace_variant kwarg to support this test
- SkipInfo('TestCommon', 'test_variant_consistency_eager'),),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager'),),
sample_inputs_func=sample_inputs_addcmul_addcdiv),
OpInfo('addcdiv',
dtypes=floating_and_complex_types(),
supports_forward_ad=True,
skips=(
# TODO: update sample inputs with for_inplace_variant kwarg to support this test
- SkipInfo('TestCommon', 'test_variant_consistency_eager'),),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager'),),
sample_inputs_func=sample_inputs_addcmul_addcdiv),
UnaryUfuncInfo('asin',
aliases=('arcsin', ),
precisionOverride({torch.bfloat16: 1e-2}),
],
skips=(
- SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',
- device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
- SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
- device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
- SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',
- device_type='cuda', dtypes=[torch.cdouble],
- active_if=IS_WINDOWS),
- SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
- device_type='cuda', dtypes=[torch.cdouble],
- active_if=IS_WINDOWS),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
+ device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
+ device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
+ device_type='cuda', dtypes=[torch.cdouble],
+ active_if=IS_WINDOWS),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
+ device_type='cuda', dtypes=[torch.cdouble],
+ active_if=IS_WINDOWS),
)),
# NOTE: derivative for inplace asinh is not implemented
UnaryUfuncInfo('asinh',
supports_inplace_autograd=False,
supports_forward_ad=True,
skips=(
- SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',
- device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
- SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
- device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
- SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal',
- device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
- SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',
- device_type='cuda', dtypes=[torch.cdouble],
- active_if=IS_WINDOWS),
- SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
- device_type='cuda', dtypes=[torch.cdouble],
- active_if=IS_WINDOWS),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
+ device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
+ device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal',
+ device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
+ device_type='cuda', dtypes=[torch.cdouble],
+ active_if=IS_WINDOWS),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
+ device_type='cuda', dtypes=[torch.cdouble],
+ active_if=IS_WINDOWS),
# Complex gradcheck tests asinh at points 0 + ix for x > 1 which are points
# where asinh is not differentiable
- SkipInfo('TestGradients', 'test_forward_mode_AD',
- dtypes=complex_types()),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_forward_mode_AD',
+ dtypes=complex_types()),
)),
UnaryUfuncInfo('atan',
aliases=('arctan', ),
decorators=(precisionOverride({torch.bfloat16: 1e-2}),),
safe_casts_outputs=True,
skips=(
- SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',
- device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
- SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
- device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
- SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal',
- device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
- SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',
- device_type='cuda', dtypes=[torch.cfloat, torch.cdouble],
- active_if=IS_WINDOWS),
- SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
- device_type='cuda', dtypes=[torch.cfloat, torch.cdouble],
- active_if=IS_WINDOWS),
- SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal',
- device_type='cuda', dtypes=[torch.cfloat, torch.cdouble],
- active_if=IS_WINDOWS),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
+ device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
+ device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal',
+ device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
+ device_type='cuda', dtypes=[torch.cfloat, torch.cdouble],
+ active_if=IS_WINDOWS),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
+ device_type='cuda', dtypes=[torch.cfloat, torch.cdouble],
+ active_if=IS_WINDOWS),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal',
+ device_type='cuda', dtypes=[torch.cfloat, torch.cdouble],
+ active_if=IS_WINDOWS),
)),
OpInfo('atan2',
dtypes=all_types_and(torch.bool),
supports_inplace_autograd=False,
supports_forward_ad=True,
skips=(
- SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal',
- device_type='cpu', dtypes=[torch.cfloat]),
- SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',
- device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
- SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
- device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
- SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',
- device_type='cuda', dtypes=[torch.cfloat, torch.cdouble],
- active_if=IS_WINDOWS),
- SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
- device_type='cuda', dtypes=[torch.cfloat],
- active_if=IS_WINDOWS),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal',
+ device_type='cpu', dtypes=[torch.cfloat]),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
+ device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
+ device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
+ device_type='cuda', dtypes=[torch.cfloat, torch.cdouble],
+ active_if=IS_WINDOWS),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
+ device_type='cuda', dtypes=[torch.cfloat],
+ active_if=IS_WINDOWS),
)),
OpInfo('broadcast_to',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
test_neg_view=False,
skips=(
# Gradcheck for complex generates invalid inputs for this function
- SkipInfo('TestGradients', 'test_forward_mode_AD', dtypes=complex_types()),)),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_forward_mode_AD', dtypes=complex_types()),)),
OpInfo('cholesky_inverse',
dtypes=floating_and_complex_types(),
backward_dtypes=floating_types(),
skips=(
# TODO: FIXME: cholesky_inverse throws an error in forward when requires_grad=True
# for complex tensors
- SkipInfo('TestCommon', 'test_dtypes'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_dtypes'),
# cholesky_inverse does not correctly warn when resizing out= inputs
- SkipInfo('TestCommon', 'test_out'),)),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out'),)),
OpInfo('chunk',
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
sample_inputs_func=sample_inputs_chunk,
supports_forward_ad=True,
skips=(
# Reference: https://github.com/pytorch/pytorch/issues/54841
- SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',
- device_type='cpu', dtypes=[torch.bfloat16]),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
+ device_type='cpu', dtypes=[torch.bfloat16]),
),
sample_kwargs=sample_kwargs_clamp_scalar,
sample_inputs_func=sample_inputs_clamp_scalar),
torch.bfloat16, torch.half),
supports_forward_ad=True,
skips=(
- SkipInfo('TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32, )),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32, )),
)),
OpInfo('resolve_conj',
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
supports_forward_ad=True,
decorators=(precisionOverride({torch.bfloat16: 1e-2}),),
skips=(
- SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',
- dtypes=[torch.cfloat, torch.cdouble], active_if=IS_WINDOWS),
- SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cpu',
- dtypes=[torch.cfloat, torch.cdouble], active_if=IS_MACOS),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
+ dtypes=[torch.cfloat, torch.cdouble], active_if=IS_WINDOWS),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
+ device_type='cpu',
+ dtypes=[torch.cfloat, torch.cdouble], active_if=IS_MACOS),
)),
UnaryUfuncInfo('cosh',
ref=np_unary_ufunc_integer_promotion_wrapper(np.cosh),
supports_forward_ad=True,
skips=(
# Reference: https://github.com/pytorch/pytorch/issues/48641
- SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
- device_type='cpu', dtypes=[torch.int8]),
- SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',
- dtypes=[torch.cfloat, torch.cdouble], active_if=IS_WINDOWS),
- SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
- dtypes=[torch.cfloat, torch.cdouble], active_if=IS_WINDOWS),
- SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cpu',
- dtypes=[torch.cfloat, torch.cdouble], active_if=IS_MACOS),
- SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard', device_type='cpu',
- dtypes=[torch.cfloat, torch.cdouble], active_if=IS_MACOS),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
+ device_type='cpu', dtypes=[torch.int8]),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
+ dtypes=[torch.cfloat, torch.cdouble], active_if=IS_WINDOWS),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
+ dtypes=[torch.cfloat, torch.cdouble], active_if=IS_WINDOWS),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
+ device_type='cpu',
+ dtypes=[torch.cfloat, torch.cdouble], active_if=IS_MACOS),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
+ device_type='cpu',
+ dtypes=[torch.cfloat, torch.cdouble], active_if=IS_MACOS),
)),
OpInfo('cov',
dtypes=all_types_and_complex_and(torch.half, torch.bfloat16),
supports_out=False,
supports_forward_ad=True,
# JIT test not working for tensor kwargs (https://github.com/pytorch/pytorch/issues/58507)
- skips=(SkipInfo('TestJit', 'test_variant_consistency_jit'),)),
+ skips=(DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),)),
OpInfo('cross',
dtypes=all_types_and_complex(),
dtypesIfCUDA=all_types_and_complex_and(torch.half),
skips=(
# AssertionError: UserWarning not triggered :
# Resized a non-empty tensor but did not warn about it.
- SkipInfo('TestCommon', 'test_out'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out'),
)),
OpInfo('cumsum',
dtypesIfCPU=all_types_and_complex(),
supports_forward_ad=True,
skips=(
# cumsum does not handle correctly out= dtypes
- SkipInfo('TestCommon', 'test_out'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out'),
),
sample_inputs_func=sample_inputs_cumulative_ops),
OpInfo('cumprod',
supports_forward_ad=True,
skips=(
# cumprod does not handle correctly out= dtypes
- SkipInfo('TestCommon', 'test_out',
- dtypes=[torch.float32]),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out',
+ dtypes=[torch.float32]),
),
# gradgradcheck fails in fast_mode=True: #56275
sample_inputs_func=sample_inputs_cumprod,
supports_forward_ad=True,
skips=(
# Reference: https://github.com/pytorch/pytorch/pull/51283#issuecomment-770614273
- SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
- dtypes=[torch.bfloat16]),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
+ dtypes=[torch.bfloat16]),
),
safe_casts_outputs=True),
OpInfo('diff',
supports_forward_ad=True,
skips=(
# Reference: https://github.com/pytorch/pytorch/issues/59174
- SkipInfo('TestJit', 'test_variant_consistency_jit'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
),
assert_autodiffed=True,
rhs_make_tensor_kwargs=dict(exclude_zero=True)),
supports_forward_ad=True,
skips=(
# Reference: https://github.com/pytorch/pytorch/issues/59174
- SkipInfo('TestJit', 'test_variant_consistency_jit'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
),
assert_autodiffed=True,
rhs_make_tensor_kwargs=dict(exclude_zero=True)),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
skips=(
# Reference: https://github.com/pytorch/pytorch/pull/50093#pullrequestreview-561791547
- SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal', dtypes=[torch.bfloat16]),
- SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard', dtypes=[torch.bfloat16]),
- SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal', dtypes=[torch.bfloat16]),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
+ dtypes=[torch.bfloat16]),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
+ dtypes=[torch.bfloat16]),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal',
+ dtypes=[torch.bfloat16]),
# Reference: https://github.com/pytorch/pytorch/issues/48010
- SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',
- device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
- SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
- device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
+ device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
+ device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
),
assert_autodiffed=True,
supports_forward_ad=True,
sample_inputs_func=sample_inputs_expand_as,
skips=(
# Because expand_as does not have a function variant.
- SkipInfo('TestJit', 'test_variant_consistency_jit'),),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),),
supports_out=False),
OpInfo('diag',
dtypes=all_types_and_complex_and(torch.bool),
skips=(
# skips below tests as torch.frexp returns tuple-like (mantissa, exponent) as outputs,
# while theses tests currently requires output to a single tensor.
- SkipInfo('TestUnaryUfuncs', 'test_batch_vs_slicing'),
- SkipInfo('TestUnaryUfuncs', 'test_contig_vs_every_other'),
- SkipInfo('TestUnaryUfuncs', 'test_contig_vs_transposed'),
- SkipInfo('TestUnaryUfuncs', 'test_non_contig_expand'),
- SkipInfo('TestUnaryUfuncs', 'test_variant_consistency'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_batch_vs_slicing'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_contig_vs_every_other'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_contig_vs_transposed'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_non_contig_expand'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_variant_consistency'),
# skips test_reference_numerics due to error in Windows CI.
# The np.frexp returns exponent as np.intc dtype on Windows platform,
# and np.intc does not have the correspond torch dtype
- SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal',
- active_if=IS_WINDOWS),
- SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
- active_if=IS_WINDOWS),
- SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',
- active_if=IS_WINDOWS),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal',
+ active_if=IS_WINDOWS),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
+ active_if=IS_WINDOWS),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
+ active_if=IS_WINDOWS),
)),
OpInfo('ge',
aliases=('greater_equal',),
supports_forward_ad=True,
skips=(
# Skip since real and imag don't have out variants.
- SkipInfo('TestUnaryUfuncs', 'test_out_arg_all_dtypes'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_out_arg_all_dtypes'),
)),
OpInfo('gradient',
dtypes=floating_and_complex_types_and(torch.int8, torch.int16,
skips=(
# following tests give a runtime error with undefined value tensor
# see discussion : https://github.com/pytorch/pytorch/issues/56660
- SkipInfo('TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32, torch.complex64)),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit',
+ dtypes=(torch.float32, torch.complex64)),
),
supports_inplace_autograd=False,
sample_inputs_func=sample_inputs_gradient),
skips=(
# Will be removed once https://github.com/pytorch/pytorch/issues/62328 is fixed
# Probable fix (open PR): https://github.com/pytorch/pytorch/pull/62570
- SkipInfo('TestGradients', 'test_fn_grad', device_type='cuda', dtypes=(torch.complex128,)),
- SkipInfo('TestCommon', 'test_dtypes'),
- SkipInfo('TestGradients', 'test_fn_gradgrad'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_fn_grad', device_type='cuda',
+ dtypes=(torch.complex128,)),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_dtypes'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_fn_gradgrad'),
# This test fails because singular inputs cannot be reliably
# generated unless we're using double types
- SkipInfo('TestOpInfo', 'test_unsupported_dtypes'),
- SkipInfo('TestOpInfo', 'test_unsupported_backward',
- dtypes=(torch.float32, torch.complex64,)),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestOpInfo', 'test_unsupported_dtypes'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestOpInfo', 'test_unsupported_backward',
+ dtypes=(torch.float32, torch.complex64,)),
)),
OpInfo('linalg.cholesky',
aten_name='linalg_cholesky',
test_neg_view=False,
skips=(
# Gradcheck for complex generates invalid inputs for this function
- SkipInfo('TestGradients', 'test_forward_mode_AD', dtypes=complex_types()),),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_forward_mode_AD', dtypes=complex_types()),),
),
OpInfo('linalg.cholesky_ex',
aten_name='linalg_cholesky_ex',
decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, skipCPUIfNoLapack],
skips=(
# Gradcheck for complex generates invalid inputs for this function
- SkipInfo('TestGradients', 'test_forward_mode_AD', dtypes=complex_types()),),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_forward_mode_AD', dtypes=complex_types()),),
),
OpInfo('linalg.cond',
aten_name='linalg_cond',
supports_autograd=False,
decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack],
skips=(
- SkipInfo('TestJit', 'test_variant_consistency_jit'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
)),
OpInfo('linalg.matrix_power',
aliases=('matrix_power',),
aten_name='linalg_norm',
skips=(
# linalg.norm does not correctly warn when resizing out= inputs
- SkipInfo('TestCommon', 'test_out'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out'),
)),
OpInfo('linalg.matrix_norm',
aten_name='linalg_matrix_norm',
sample_inputs_func=sample_inputs_linalg_matrix_norm,
skips=(
# linalg.matrix_norm does not correctly warn when resizing out= inputs
- SkipInfo('TestCommon', 'test_out'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out'),
)),
OpInfo('linalg.qr',
aten_name='linalg_qr',
aten_name='linalg_vector_norm',
skips=(
# linalg.vector_norm does not correctly warn when resizing out= inputs
- SkipInfo('TestCommon', 'test_out'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out'),
)),
UnaryUfuncInfo('log',
ref=np.log,
supports_forward_ad=True,
decorators=(precisionOverride({torch.bfloat16: 5e-2}),),
skips=(
- SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',
- device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],
- active_if=IS_WINDOWS),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
+ device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],
+ active_if=IS_WINDOWS),
)),
UnaryUfuncInfo('log10',
ref=np.log10,
safe_casts_outputs=True,
supports_forward_ad=True,
skips=(
- SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',
- device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],
- active_if=IS_WINDOWS),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
+ device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],
+ active_if=IS_WINDOWS),
)),
UnaryUfuncInfo('log1p',
ref=np.log1p,
supports_forward_ad=True,
decorators=(precisionOverride({torch.bfloat16: 1e-1}),),
skips=(
- SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',
- dtypes=[torch.cfloat, torch.cdouble]),
- SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal',
- dtypes=[torch.cfloat, torch.cdouble]),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
+ dtypes=[torch.cfloat, torch.cdouble]),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal',
+ dtypes=[torch.cfloat, torch.cdouble]),
)),
OpInfo('logaddexp',
dtypes=floating_types(),
# torch.bool
# >>> t.logical_not_().dtype
# torch.float32
- SkipInfo('TestUnaryUfuncs', 'test_variant_consistency',
- dtypes=all_types_and_complex_and(torch.half, torch.bfloat16)),
- SkipInfo('TestCommon', 'test_variant_consistency_eager',
- dtypes=all_types_and_complex_and(torch.half, torch.bfloat16)),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_variant_consistency',
+ dtypes=all_types_and_complex_and(torch.half, torch.bfloat16)),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager',
+ dtypes=all_types_and_complex_and(torch.half, torch.bfloat16)),
)),
OpInfo('lt',
aliases=('less',),
decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, skipCPUIfNoLapack],
skips=(
# we skip jit tests because `lu` is a torch function
- SkipInfo('TestJit', 'test_variant_consistency_jit'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
)),
OpInfo('lu_solve',
op=torch.lu_solve,
skips=(
# cuda gradchecks are slow
# see discussion https://github.com/pytorch/pytorch/pull/47761#issuecomment-747316775
- SkipInfo('TestGradients', 'test_fn_gradgrad', device_type='cuda'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_fn_gradgrad', device_type='cuda'),
)),
OpInfo('masked_fill',
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_matmul,
skips=(
# matmul does not correctly warn when resizing out= inputs
- SkipInfo('TestCommon', 'test_out'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out'),
)),
OpInfo('max',
op=torch.max,
supports_forward_ad=True,
skips=(
# max does not correctly warn when resizing out= inputs
- SkipInfo('TestCommon', 'test_out'),)),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out'),)),
OpInfo('max',
op=torch.max,
variant_test_name='reduction_no_dim',
supports_forward_ad=True,
skips=(
# TODO: FIXME: complex inputs requiring grad error in forward
- SkipInfo('TestCommon', 'test_dtypes'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_dtypes'),
# TODO: review with var_mean tests in test_autograd.py
- SkipInfo('TestJit', 'test_variant_consistency_jit'),
- SkipInfo('TestGradients', 'test_fn_grad'),
- SkipInfo('TestGradients', 'test_fn_gradgrad'),
- SkipInfo('TestGradients', 'test_forward_mode_AD'))),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_fn_grad'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_fn_gradgrad'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_forward_mode_AD'))),
OpInfo('std_mean',
dtypes=floating_and_complex_types_and(torch.half),
dtypesIfCPU=floating_and_complex_types_and(torch.half, torch.bfloat16),
supports_forward_ad=True,
skips=(
# TODO: FIXME: complex inputs requiring grad error in forward
- SkipInfo('TestCommon', 'test_dtypes'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_dtypes'),
# TODO: fix along with var_mean autograd tests
- SkipInfo('TestJit', 'test_variant_consistency_jit'),
- SkipInfo('TestGradients', 'test_fn_grad'),
- SkipInfo('TestGradients', 'test_fn_gradgrad'),
- SkipInfo('TestGradients', 'test_forward_mode_AD'))),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_fn_grad'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_fn_gradgrad'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_forward_mode_AD'))),
OpInfo('meshgrid',
variant_test_name='variadic_tensors',
# Our implementation corresponds to "ij" indexing for
sample_inputs_func=partial(sample_inputs_meshgrid, variant='variadic'),
skips=[
# JIT does not support variadic tensors.
- SkipInfo('TestJit', 'test_variant_consistency_jit'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
# meshgrid is defined in torch.functional to take a
# variadic list of tensors. Variadic parameters are not
# compatible with the normalize operator tests.
- SkipInfo('TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
# Skip operator schema test because this is a functional and not an operator
- SkipInfo('TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'),
],
supports_out=False,
supports_forward_ad=True),
# meshgrid is defined in torch.functional to take a
# variadic list of tensors. Variadic parameters are not
# compatible with the normalize operator tests.
- SkipInfo('TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),
],
assert_autodiffed=True,
supports_out=False,
supports_forward_ad=True,
skips=(
# min does not correctly warn when resizing out= inputs
- SkipInfo('TestCommon', 'test_out'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out'),
)),
OpInfo('min',
op=torch.min,
# RuntimeError: aliasOp != torch::jit::getOperatorAliasMap().end()
# INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":159,
# please report a bug to PyTorch.
- SkipInfo('TestJit', 'test_variant_consistency_jit',),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit',),
)),
OpInfo('aminmax',
ref=lambda x, dim=None, keepdim=False: (np.amin(x, axis=dim, keepdims=keepdim), np.amax(x, axis=dim, keepdims=keepdim)),
sample_inputs_func=sample_inputs_aminmax,
skips=(
# FIXME: aminmax does not check for safe casting to output
- SkipInfo('TestCommon', 'test_out'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out'),
)),
OpInfo('nn.functional.cosine_similarity',
aten_name="cosine_similarity",
dtypes=floating_types(),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
skips=(
- SkipInfo('TestJit', 'test_variant_consistency_jit'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
),
supports_out=False,
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
skips=(
# RuntimeError: !lhs.isAliasOf(rhs)INTERNAL ASSERT FAILED at
# "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":104, please report a bug to PyTorch.
- SkipInfo('TestJit', 'test_variant_consistency_jit'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
),
supports_out=False,),
OpInfo('nn.functional.layer_norm',
# There are multiple aten ops, namely reflection_pad_{1,2,3}d
# so we can't use aten_name argument in opinfo
# RuntimeError: aliasOp != torch::jit::getOperatorAliasMap().end()
- SkipInfo('TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)),
),
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
supports_out=False),
# There are multiple aten ops, namely replication_pad_{1,2,3}d
# so we can't use aten_name argument in opinfo
# RuntimeError: aliasOp != torch::jit::getOperatorAliasMap().end()
- SkipInfo('TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)),
),
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
supports_out=False),
skips=(
# Doesn't have a corresponding aten operator.
# RuntimeError: aliasOp != torch::jit::getOperatorAliasMap().end()
- SkipInfo('TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)),
),
supports_out=False),
OpInfo('nn.functional.hardswish',
sample_inputs_func=sample_inputs_nn_unfold,
skips=(
# JIT alias info internal asserts here
- SkipInfo('TestJit', 'test_variant_consistency_jit'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
),
supports_out=False),
OpInfo('nn.functional.interpolate',
sample_inputs_func=partial(sample_inputs_interpolate, 'nearest'),
skips=(
# JIT alias info internal asserts here
- SkipInfo('TestJit', 'test_variant_consistency_jit'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
),
supports_out=False),
OpInfo('nn.functional.interpolate',
sample_inputs_func=partial(sample_inputs_interpolate, 'linear'),
skips=(
# JIT alias info internal asserts here
- SkipInfo('TestJit', 'test_variant_consistency_jit'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
),
supports_out=False),
OpInfo('nn.functional.interpolate',
sample_inputs_func=partial(sample_inputs_interpolate, 'bilinear'),
skips=(
# JIT alias info internal asserts here
- SkipInfo('TestJit', 'test_variant_consistency_jit'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
),
supports_out=False),
OpInfo('nn.functional.interpolate',
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
skips=(
# JIT alias info internal asserts here
- SkipInfo('TestJit', 'test_variant_consistency_jit'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
),
supports_out=False),
OpInfo('nn.functional.interpolate',
sample_inputs_func=partial(sample_inputs_interpolate, 'trilinear'),
skips=(
# JIT alias info internal asserts here
- SkipInfo('TestJit', 'test_variant_consistency_jit'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
),
supports_out=False),
OpInfo('nn.functional.interpolate',
gradcheck_nondet_tol=GRADCHECK_NONDET_TOL,
skips=(
# JIT alias info internal asserts here
- SkipInfo('TestJit', 'test_variant_consistency_jit'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
),
supports_out=False),
OpInfo('nn.functional.leaky_relu',
sample_inputs_func=sample_inputs_topk,
skips=(
# Topk is not raising a warning when the out is resized
- SkipInfo('TestCommon', 'test_out'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out'),
)),
# We have to add 2 OpInfo entry for `igamma` and `igammac`.First is the
# standard entry, second is to run gradcheck tests on the second argument.
supports_inplace_autograd=False,
skips=(
# test does not work with passing lambda for op
- SkipInfo('TestJit', 'test_variant_consistency_jit'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
# test fails are we permute the arguments function variant
# but not for inplace or method.
- SkipInfo('TestCommon', 'test_variant_consistency_eager'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager'),
),
sample_inputs_func=sample_inputs_igamma_igammac),
OpInfo('igammac',
supports_inplace_autograd=False,
skips=(
# test does not work with passing lambda for op
- SkipInfo('TestJit', 'test_variant_consistency_jit'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
# test fails are we permute the arguments function variant
# but not for inplace or method.
- SkipInfo('TestCommon', 'test_variant_consistency_eager'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager'),
),
sample_inputs_func=sample_inputs_igamma_igammac),
OpInfo('nn.functional.hardshrink',
sample_inputs_func=sample_inputs_mm,
skips=(
# mm does not correctly warn when resizing out= inputs
- SkipInfo('TestCommon', 'test_out'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out'),
)),
OpInfo('mode',
op=torch.mode,
MvlGammaInfo(variant_test_name='mvlgamma_p_3',
domain=(2, None),
skips=skips_mvlgamma(skip_redundant=True) + (
- SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard', dtypes=(torch.float16,)),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
+ dtypes=(torch.float16,)),
),
sample_kwargs=lambda device, dtype, input: ({'p': 3}, {'d': 3})),
MvlGammaInfo(variant_test_name='mvlgamma_p_5',
domain=(3, None),
skips=skips_mvlgamma(skip_redundant=True) + (
- SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard', dtypes=(torch.float16,)),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
+ dtypes=(torch.float16,)),
),
sample_kwargs=lambda device, dtype, input: ({'p': 5}, {'d': 5})),
OpInfo('ne',
sample_inputs_func=sample_inputs_dist,
skips=(
# dist does not correctly warn when resizing out= inputs
- SkipInfo('TestCommon', 'test_out'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out'),
)),
OpInfo('outer',
op=torch.outer,
sample_inputs_func=sample_inputs_pow,
supports_forward_ad=True,
skips=(
- SkipInfo('TestMathBits', 'test_conj_view', device_type='cuda'),),),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_conj_view', device_type='cuda'),),),
OpInfo('qr',
op=torch.qr,
dtypes=floating_and_complex_types(),
dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16),
skips=(
# Reference: https://github.com/pytorch/pytorch/pull/51283#issuecomment-770614273
- SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal',
- dtypes=[torch.bfloat16]),
- SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
- dtypes=[torch.bfloat16]),
- SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',
- dtypes=[torch.bfloat16]),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal',
+ dtypes=[torch.bfloat16]),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
+ dtypes=[torch.bfloat16]),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
+ dtypes=[torch.bfloat16]),
),
safe_casts_outputs=True),
UnaryUfuncInfo('real',
supports_forward_ad=True,
skips=(
# Skip since real and imag don't have out variants.
- SkipInfo('TestUnaryUfuncs', 'test_out_arg_all_dtypes'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_out_arg_all_dtypes'),
)),
OpInfo('roll',
ref=np.roll,
torch.float16: 1e-2}),),
skips=(
# Reference: https://github.com/pytorch/pytorch/issues/49133
- SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal',
- dtypes=[torch.cfloat]),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal',
+ dtypes=[torch.cfloat]),
)),
UnaryUfuncInfo('sinh',
ref=np_unary_ufunc_integer_promotion_wrapper(np.sinh),
supports_forward_ad=True,
decorators=(precisionOverride({torch.float16: 1e-2}),),
skips=(
- SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',
- device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],
- active_if=(IS_MACOS or IS_WINDOWS)),
- SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
- device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],
- active_if=(IS_MACOS or IS_WINDOWS)),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
+ device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],
+ active_if=(IS_MACOS or IS_WINDOWS)),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
+ device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],
+ active_if=(IS_MACOS or IS_WINDOWS)),
# Reference: https://github.com/pytorch/pytorch/issues/48641
- SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
- device_type='cpu', dtypes=[torch.int8]),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
+ device_type='cpu', dtypes=[torch.int8]),
)),
UnaryUfuncInfo('sign',
ref=reference_sign,
supports_forward_ad=True,
skips=(
# Reference: https://github.com/pytorch/pytorch/issues/41245
- SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',
- dtypes=[torch.bfloat16, torch.float16, torch.float32, torch.float64]),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
+ dtypes=[torch.bfloat16, torch.float16, torch.float32, torch.float64]),
)),
UnaryUfuncInfo('sgn',
ref=reference_sgn,
supports_forward_ad=True,
skips=(
# Reference: https://github.com/pytorch/pytorch/issues/41245
- SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',
- dtypes=[torch.bfloat16, torch.float16, torch.float32, torch.float64]),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
+ dtypes=[torch.bfloat16, torch.float16, torch.float32, torch.float64]),
# Reference: https://github.com/pytorch/pytorch/issues/53958
# Test fails in comparison on Nan as the `equal_nan` is True for
# comparing the CPU tensors.
- SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',
- device_type='cpu', dtypes=[torch.complex64, torch.complex128]),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
+ device_type='cpu', dtypes=[torch.complex64, torch.complex128]),
# Reference: https://github.com/pytorch/pytorch/issues/48486
- SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
- device_type='cpu', dtypes=[torch.complex64])
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
+ device_type='cpu', dtypes=[torch.complex64])
)),
OpInfo('split',
dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool),
dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool),
sample_inputs_func=sample_inputs_rbinops,
supports_out=False,
- skips=(SkipInfo('TestJit', 'test_variant_consistency_jit',),),
+ skips=(DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit',),),
assert_autodiffed=True,
supports_forward_ad=True,
autodiff_nonfusible_nodes=['aten::add'],),
dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool),
sample_inputs_func=sample_inputs_rbinops,
supports_out=False,
- skips=(SkipInfo('TestJit', 'test_variant_consistency_jit',),),
+ skips=(DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit',),),
supports_forward_ad=True,
assert_autodiffed=True,
autodiff_nonfusible_nodes=['aten::mul', 'aten::reciprocal'],),
dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool),
sample_inputs_func=sample_inputs_rbinops,
supports_out=False,
- skips=(SkipInfo('TestJit', 'test_variant_consistency_jit',),),
+ skips=(DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit',),),
assert_autodiffed=True,
supports_forward_ad=True,
autodiff_nonfusible_nodes=['aten::mul'],),
dtypes=integral_types_and(torch.bool),
sample_inputs_func=sample_inputs_rbinops,
supports_out=False,
- skips=(SkipInfo('TestCommon', 'test_variant_consistency_jit',),),
+ skips=(DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_jit',),),
supports_autograd=False,
supports_forward_ad=True,),
OpInfo('__ror__',
dtypes=integral_types_and(torch.bool),
sample_inputs_func=sample_inputs_rbinops,
supports_out=False,
- skips=(SkipInfo('TestCommon', 'test_variant_consistency_jit',),),
+ skips=(DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_jit',),),
supports_autograd=False,
supports_forward_ad=True,),
OpInfo('__rxor__',
dtypes=integral_types_and(torch.bool),
sample_inputs_func=sample_inputs_rbinops,
supports_out=False,
- skips=(SkipInfo('TestCommon', 'test_variant_consistency_jit',),),
+ skips=(DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_jit',),),
supports_autograd=False,
supports_forward_ad=True,),
OpInfo('__rmatmul__',
toleranceOverride({torch.complex64: tol(atol=1e-05, rtol=1.2e-03)}),
'TestMathBits', 'test_conj_view')],
skips=(
- SkipInfo('TestJit', 'test_variant_consistency_jit',),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit',),
)),
OpInfo('__rmod__',
op=torch.Tensor.__rmod__,
dtypesIfCUDA=all_types_and(torch.bfloat16, torch.half, torch.bool),
sample_inputs_func=sample_inputs_rbinops,
supports_out=False,
- skips=(SkipInfo('TestJit', 'test_variant_consistency_jit',),),
+ skips=(DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit',),),
# Support autograd after torch.remainder(Tensor, Tensor) supports
# autograd of the second argument.
# https://github.com/pytorch/pytorch/pull/58476/files#r637167630
supports_out=False,
supports_forward_ad=True,
skips=(
- SkipInfo('TestJit', 'test_variant_consistency_jit',),),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit',),),
assert_autodiffed=True,
autodiff_nonfusible_nodes=['aten::pow'],),
OpInfo('__rsub__',
dtypes=all_types_and_complex_and(torch.bfloat16, torch.half),
sample_inputs_func=sample_inputs_rbinops,
supports_out=False,
- skips=(SkipInfo('TestJit', 'test_variant_consistency_jit',),),
+ skips=(DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit',),),
assert_autodiffed=True,
autodiff_nonfusible_nodes=['aten::rsub'],),
OpInfo('rsub',
skips=(
# Reference: https://github.com/pytorch/pytorch/issues/53797
# JIT doesn't understand complex literals
- SkipInfo('TestJit', 'test_variant_consistency_jit',
- dtypes=[torch.cfloat, torch.cdouble]),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit',
+ dtypes=[torch.cfloat, torch.cdouble]),
),
sample_inputs_func=partial(sample_inputs_rsub, variant='tensor'),),
OpInfo('rsub',
skips=(
# Reference: https://github.com/pytorch/pytorch/issues/53797
# JIT doesn't understand complex literals
- SkipInfo('TestJit', 'test_variant_consistency_jit',
- dtypes=all_types_and_complex_and(torch.bfloat16, torch.half)),),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit',
+ dtypes=all_types_and_complex_and(torch.bfloat16, torch.half)),),
assert_autodiffed=True,),
OpInfo('select',
dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool),
safe_casts_outputs=True,
supports_forward_ad=True,
skips=(
- SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',
- device_type='cpu', dtypes=[torch.bfloat16]),
- SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
- device_type='cpu', dtypes=[torch.bfloat16]),
- SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal',
- device_type='cpu', dtypes=[torch.bfloat16]),
- SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',
- device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],
- active_if=(IS_MACOS or IS_WINDOWS)),
- SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
- device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],
- active_if=(IS_MACOS or IS_WINDOWS)),
- SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal',
- device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],
- active_if=(IS_MACOS or IS_WINDOWS)),
- SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
- device_type='cuda', dtypes=[torch.float64],
- active_if=TEST_WITH_ROCM),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
+ device_type='cpu', dtypes=[torch.bfloat16]),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
+ device_type='cpu', dtypes=[torch.bfloat16]),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal',
+ device_type='cpu', dtypes=[torch.bfloat16]),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
+ device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],
+ active_if=(IS_MACOS or IS_WINDOWS)),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
+ device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],
+ active_if=(IS_MACOS or IS_WINDOWS)),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal',
+ device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],
+ active_if=(IS_MACOS or IS_WINDOWS)),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
+ device_type='cuda', dtypes=[torch.float64],
+ active_if=TEST_WITH_ROCM),
)),
UnaryUfuncInfo('tanh',
ref=np.tanh,
assert_jit_shape_analysis=True,
supports_forward_ad=True,
skips=(
- SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',
- device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],
- active_if=(IS_MACOS or IS_WINDOWS)),
- SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
- device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],
- active_if=(IS_MACOS or IS_WINDOWS)),
- SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal',
- device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],
- active_if=(IS_MACOS or IS_WINDOWS)),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
+ device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],
+ active_if=(IS_MACOS or IS_WINDOWS)),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
+ device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],
+ active_if=(IS_MACOS or IS_WINDOWS)),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal',
+ device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],
+ active_if=(IS_MACOS or IS_WINDOWS)),
)),
OpInfo('tensor_split',
ref=np.array_split,
assert_autodiffed=True,
skips=(
# Reference: https://github.com/pytorch/pytorch/pull/48926#issuecomment-739734774
- SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',
- device_type='cpu', dtypes=[torch.bfloat16]),
- SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
- device_type='cpu', dtypes=[torch.bfloat16]),
- SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal',
- device_type='cpu', dtypes=[torch.bfloat16]),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
+ device_type='cpu', dtypes=[torch.bfloat16]),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
+ device_type='cpu', dtypes=[torch.bfloat16]),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal',
+ device_type='cpu', dtypes=[torch.bfloat16]),
)),
UnaryUfuncInfo('nan_to_num',
ref=np.nan_to_num,
safe_casts_outputs=True,
skips=(
# Reference: https://github.com/pytorch/pytorch/issues/45690
- SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',
- dtypes=[torch.cfloat, torch.cdouble]),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
+ dtypes=[torch.cfloat, torch.cdouble]),
# Reference: https://github.com/pytorch/pytorch/pull/49102#issuecomment-744604601
- SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',
- dtypes=[torch.bfloat16]),
- SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
- dtypes=[torch.bfloat16]),
- SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal',
- dtypes=[torch.bfloat16]),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
+ dtypes=[torch.bfloat16]),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
+ dtypes=[torch.bfloat16]),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal',
+ dtypes=[torch.bfloat16]),
)),
UnaryUfuncInfo('rsqrt',
ref=lambda x: np.reciprocal(np.sqrt(x)),
decorators=(precisionOverride({torch.bfloat16: 7e-2}),),
skips=(
# Reference: https://github.com/pytorch/pytorch/issues/47358
- SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
- device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],
- active_if=IS_MACOS),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
+ device_type='cpu', dtypes=[torch.cfloat, torch.cdouble],
+ active_if=IS_MACOS),
# Reference: https://github.com/pytorch/pytorch/pull/47293#issuecomment-721774436
- SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
- dtypes=[torch.bfloat16])),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
+ dtypes=[torch.bfloat16])),
safe_casts_outputs=True,
handles_complex_extremals=False),
UnaryUfuncInfo('square',
supports_forward_ad=True,
skips=(
# Reference: https://github.com/pytorch/pytorch/issues/52549
- SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
- dtypes=[torch.cfloat, torch.cdouble]),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
+ dtypes=[torch.cfloat, torch.cdouble]),
# >>> t = torch.tensor(complex(-0.01, float("inf")))
# >>> np.square(t.numpy())
# (-inf-infj)
# tensor(-inf-infj)
# >>> t.cuda().square()
# tensor(inf+nanj, device='cuda:0')
- SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',
- device_type='cuda', dtypes=[torch.cfloat, torch.cdouble]),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
+ device_type='cuda', dtypes=[torch.cfloat, torch.cdouble]),
# Reference: https://github.com/pytorch/pytorch/pull/52551#issuecomment-782596181
- SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
- dtypes=[torch.bfloat16]),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
+ dtypes=[torch.bfloat16]),
),),
OpInfo('lerp',
dtypes=floating_and_complex_types(),
skips=(
# test does not work with passing lambda for op
# there's a test `test_einsum` in `test_jit.py` to handle this case
- SkipInfo('TestJit', 'test_variant_consistency_jit'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
)),
OpInfo('svd',
op=torch.svd,
# def the_method(i0):
# return torch.polygamma(i0, 1)
# ~~~~~~~~~~~~~~~ <--- HERE
- SkipInfo('TestJit', 'test_variant_consistency_jit'),),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),),
sample_kwargs=lambda device, dtype, input: ({'n': 0}, {'n': 0})),
# A separate OpInfo entry for special.polygamma is needed to reorder the arguments
# for the alias. See the discussion here: https://github.com/pytorch/pytorch/pull/59691#discussion_r650261939
# def the_method(i0):
# return torch.polygamma(i0, 1)
# ~~~~~~~~~~~~~~~ <--- HERE
- SkipInfo('TestJit', 'test_variant_consistency_jit'),),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),),
sample_kwargs=lambda device, dtype, input: ({'n': 0}, {'n': 0})),
UnaryUfuncInfo('polygamma',
op=lambda x, n, **kwargs: torch.polygamma(n, x, **kwargs),
sample_inputs_func=sample_inputs_polygamma,
skips=(
# Redundant tests
- SkipInfo('TestGradients'),
- SkipInfo('TestJit'),
- SkipInfo('TestCommon'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestGradients'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestJit'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestCommon'),
# Mismatch: https://github.com/pytorch/pytorch/issues/55357
- SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal'),
- SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard'),
- SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal'),
),
sample_kwargs=lambda device, dtype, input: ({'n': 1}, {'n': 1})),
UnaryUfuncInfo('polygamma',
sample_inputs_func=sample_inputs_polygamma,
skips=(
# Redundant tests
- SkipInfo('TestGradients'),
- SkipInfo('TestJit'),
- SkipInfo('TestCommon'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestGradients'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestJit'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestCommon'),
# Mismatch: https://github.com/pytorch/pytorch/issues/55357
- SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal'),
- SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
- active_if=TEST_WITH_ROCM),
- SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal',
- active_if=TEST_WITH_ROCM),),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
+ active_if=TEST_WITH_ROCM),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal',
+ active_if=TEST_WITH_ROCM),),
sample_kwargs=lambda device, dtype, input: ({'n': 2}, {'n': 2})),
UnaryUfuncInfo('polygamma',
op=lambda x, n, **kwargs: torch.polygamma(n, x, **kwargs),
sample_inputs_func=sample_inputs_polygamma,
skips=(
# Redundant tests
- SkipInfo('TestGradients'),
- SkipInfo('TestJit'),
- SkipInfo('TestCommon'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestGradients'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestJit'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestCommon'),
# Mismatch: https://github.com/pytorch/pytorch/issues/55357
- SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal'),
- SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
- active_if=TEST_WITH_ROCM),
- SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal',
- active_if=TEST_WITH_ROCM),),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
+ active_if=TEST_WITH_ROCM),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal',
+ active_if=TEST_WITH_ROCM),),
sample_kwargs=lambda device, dtype, input: ({'n': 3}, {'n': 3})),
UnaryUfuncInfo('polygamma',
op=lambda x, n, **kwargs: torch.polygamma(n, x, **kwargs),
sample_inputs_func=sample_inputs_polygamma,
skips=(
# Redundant tests
- SkipInfo('TestGradients'),
- SkipInfo('TestJit'),
- SkipInfo('TestCommon'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestGradients'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestJit'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestCommon'),
# Mismatch: https://github.com/pytorch/pytorch/issues/55357
- SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal'),
- SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
- active_if=TEST_WITH_ROCM),
- SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal',
- active_if=TEST_WITH_ROCM),),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
+ active_if=TEST_WITH_ROCM),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal',
+ active_if=TEST_WITH_ROCM),),
sample_kwargs=lambda device, dtype, input: ({'n': 4}, {'n': 4})),
OpInfo('ravel',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_forward_ad=True,
skips=(
# Because view_as does not have a function variant.
- SkipInfo('TestJit', 'test_variant_consistency_jit'),),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),),
sample_inputs_func=sample_inputs_view_as_reshape_as,
),
OpInfo('pinverse',
supports_inplace_autograd=False,
supports_scripting=False,
op=torch.Tensor.__getitem__,
- skips=(SkipInfo('TestJit', 'test_variant_consistency_jit', device_type='cuda'),),
+ skips=(DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', device_type='cuda'),),
assert_jit_shape_analysis=False, # TODO: support index.Tensor()
sample_inputs_func=sample_inputs_getitem,),
OpInfo('index_put',
test_neg_view=False,
sample_inputs_func=sample_inputs_index_put,
skips=(
- SkipInfo('TestJit', 'test_variant_consistency_jit'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
)),
OpInfo('sort',
dtypes=all_types_and(torch.bool, torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_sort,
skips=(
# sort does not correctly warn when resizing out= inputs
- SkipInfo('TestCommon', 'test_out'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out'),
)),
OpInfo('put',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
assert_autodiffed=True,
skips=(
# TODO: see https://github.com/pytorch/pytorch/issues/64709
- SkipInfo('TestCommon', 'test_out'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out'),
)),
OpInfo('hstack',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_forward_ad=True,
skips=(
# TODO: see https://github.com/pytorch/pytorch/issues/64709
- SkipInfo('TestCommon', 'test_out'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out'),
)),
OpInfo('hypot',
dtypes=floating_types(),
skips=(
# JIT tests don't work with Tensor keyword arguments
# https://github.com/pytorch/pytorch/issues/58507
- SkipInfo('TestJit', 'test_variant_consistency_jit'),),),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),),),
OpInfo('cat',
ref=lambda input_seq, dim=0, **kwargs: np.concatenate(input_seq, axis=dim, **kwargs),
aliases=('concat',),
assert_autodiffed=True,
skips=(
# TODO: see https://github.com/pytorch/pytorch/issues/64709
- SkipInfo('TestCommon', 'test_out'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out'),
# RuntimeError: Arguments for call not valid.
# Expected a value of type 'List[Tensor]' for argument
# 'tensors' but instead found type 'Tensor (inferred)'.
- SkipInfo('TestJit', 'test_jit_alias_remapping'),)),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_jit_alias_remapping'),)),
OpInfo('vstack',
aliases=('row_stack',),
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
supports_forward_ad=True,
skips=(
# TODO: see https://github.com/pytorch/pytorch/issues/64709
- SkipInfo('TestCommon', 'test_out'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out'),
# RuntimeError: _fn() Expected a value of type
# 'Tensor (inferred)' for argument 't0' but instead found type 'tuple'.
- SkipInfo('TestJit', 'test_jit_alias_remapping'),)),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_jit_alias_remapping'),)),
OpInfo('dstack',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_hstack_dstack_vstack,
supports_forward_ad=True,
skips=(
# TODO: see https://github.com/pytorch/pytorch/issues/64709
- SkipInfo('TestCommon', 'test_out'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out'),
)),
OpInfo('unfold',
op=lambda x, *args: x.unfold(*args),
check_batched_gradgrad=False,
skips=(
# torch.unfold does not exist so we get a RuntimeError.
- SkipInfo('TestJit', 'test_variant_consistency_jit',
- dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16)),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit',
+ dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16)),
# Skip operator schema test because this is a functional and not an operator
- SkipInfo('TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'),
),
sample_inputs_func=sample_inputs_unfold),
OpInfo('msort',
check_batched_gradgrad=False,
skips=(
# msort does not correctly warn when resizing out= inputs.
- SkipInfo('TestCommon', 'test_out',
- dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16)),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out',
+ dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16)),
),
sample_inputs_func=sample_inputs_msort),
OpInfo('movedim',
supports_forward_ad=True,
skips=(
# torch.repeat does not exist so we get a RuntimeError.
- SkipInfo('TestJit', 'test_variant_consistency_jit',
- dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16)),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit',
+ dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16)),
),
sample_inputs_func=sample_repeat_tile),
OpInfo('squeeze',
supports_out=False,
skips=(
# JIT has issue when op is passed as lambda
- SkipInfo('TestJit', 'test_variant_consistency_jit'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
),
sample_inputs_func=sample_inputs_fill_),
OpInfo('resize_',
supports_forward_ad=True,
skips=(
# JIT has issue when op is passed as lambda
- SkipInfo('TestJit', 'test_variant_consistency_jit'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
),
sample_inputs_func=sample_inputs_zero_),
OpInfo('special.xlog1py',
safe_casts_outputs=True,
skips=(
# Lambda doesn't work in JIT test
- SkipInfo("TestJit", "test_variant_consistency_jit"),
+ DecorateInfo(unittest.skip("Skipped!"), "TestJit", "test_variant_consistency_jit"),
),
sample_inputs_func=sample_inputs_zeta),
OpInfo('logsumexp',
skips=(
# Currently failing due to an INTERNAL_ASSERT_FAILED error.
# Reference: https://github.com/pytorch/pytorch/issues/56314
- SkipInfo("TestJit", "test_variant_consistency_jit", dtypes=[torch.float32]),
+ DecorateInfo(unittest.skip("Skipped!"), "TestJit", "test_variant_consistency_jit", dtypes=[torch.float32]),
# Skip operator schema test because this is a functional and not an operator.
# Reference: https://github.com/pytorch/pytorch/issues/54574
- SkipInfo('TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'),
)
),
OpInfo('to_sparse',
check_batched_gradgrad=False,
skips=(
# TODO: FIXME: complex inputs requiring grad error in forward
- SkipInfo('TestCommon', 'test_dtypes'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_dtypes'),
# JIT has issue when op is passed as lambda
- SkipInfo('TestJit', 'test_variant_consistency_jit'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
)
),
OpInfo('logcumsumexp',
backward_dtypesIfCUDA=floating_types_and(),
skips=(
# AssertionError: UserWarning not triggered : Resized a non-empty tensor but did not warn about it.
- SkipInfo('TestCommon', 'test_out', dtypes=(torch.float32,), device_type='cuda'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out', dtypes=(torch.float32,), device_type='cuda'),
),
sample_inputs_func=sample_inputs_logcumsumexp),
UnaryUfuncInfo('sigmoid',
torch.bfloat16: 1e-2}),),
skips=(
# TODO: FIXME: sigmoid fails on complex inputs that require grad
- SkipInfo('TestCommon', 'test_dtypes'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_dtypes'),
# Reference: https://github.com/pytorch/pytorch/issues/56012
- SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',
- device_type='cuda', dtypes=[torch.complex64]),
- SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
- device_type='cuda', dtypes=[torch.complex64]),
- SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',
- device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
- SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
- device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
- SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal',
- device_type='cpu', dtypes=[torch.cfloat, torch.cdouble])),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
+ device_type='cuda', dtypes=[torch.complex64]),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
+ device_type='cuda', dtypes=[torch.complex64]),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
+ device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
+ device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal',
+ device_type='cpu', dtypes=[torch.cfloat, torch.cdouble])),
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16),
safe_casts_outputs=True,
dtypes=all_types_and(torch.bool, torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16),
skips=(
- SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
- dtypes=[torch.bfloat16, torch.float16]),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
+ dtypes=[torch.bfloat16, torch.float16]),
),
supports_inplace_autograd=False,
safe_casts_outputs=True,
domain=(-1, 1),
skips=(
# Reference: https://github.com/pytorch/pytorch/pull/49155#issuecomment-742664611
- SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',
- active_if=TEST_SCIPY and distutils.version.LooseVersion(scipy.__version__) < "1.4.0"),
- SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
- active_if=TEST_SCIPY and distutils.version.LooseVersion(scipy.__version__) < "1.4.0"),
- SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal',
- active_if=TEST_SCIPY and distutils.version.LooseVersion(scipy.__version__) < "1.4.0"),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
+ active_if=TEST_SCIPY and distutils.version.LooseVersion(scipy.__version__) < "1.4.0"),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
+ active_if=TEST_SCIPY and distutils.version.LooseVersion(scipy.__version__) < "1.4.0"),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal',
+ active_if=TEST_SCIPY and distutils.version.LooseVersion(scipy.__version__) < "1.4.0"),
)),
UnaryUfuncInfo('lgamma',
ref=reference_lgamma if TEST_SCIPY else _NOTHING,
supports_forward_ad=True,
skips=(
# Reference: https://github.com/pytorch/pytorch/pull/50140#discussion_r552615345
- SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',
- dtypes=[torch.bfloat16]),
- SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
- device_type='cpu', dtypes=[torch.bfloat16]),
- SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal',
- device_type='cpu', dtypes=[torch.bfloat16]),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
+ dtypes=[torch.bfloat16]),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
+ device_type='cpu', dtypes=[torch.bfloat16]),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal',
+ device_type='cpu', dtypes=[torch.bfloat16]),
# Reference: https://github.com/pytorch/pytorch/pull/50140#issuecomment-756150214
- SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_extremal',
- dtypes=[torch.float32, torch.float64], active_if=IS_WINDOWS),
- SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_hard',
- dtypes=[torch.float32, torch.float64], active_if=IS_WINDOWS),
- SkipInfo('TestUnaryUfuncs', 'test_reference_numerics_normal',
- dtypes=[torch.float32, torch.float64], active_if=IS_WINDOWS),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
+ dtypes=[torch.float32, torch.float64], active_if=IS_WINDOWS),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard',
+ dtypes=[torch.float32, torch.float64], active_if=IS_WINDOWS),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal',
+ dtypes=[torch.float32, torch.float64], active_if=IS_WINDOWS),
),
safe_casts_outputs=True),
OpInfo(
supports_out=False,
skips=(
# test does not work with passing lambda for op
- SkipInfo('TestJit', 'test_variant_consistency_jit'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
),
dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16)),
# `torch.norm` has multiple code paths depending on the value of `p`.
skips=(
# RuntimeError not raised :
# Expected RuntimeError when calling with input.device=cpu and out.device=cuda
- SkipInfo('TestCommon', 'test_out'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out'),
)
),
OpInfo('norm',
skips=(
# RuntimeError not raised :
# Expected RuntimeError when calling with input.device=cpu and out.device=cuda
- SkipInfo('TestCommon', 'test_out'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out'),
# RuntimeError:
# Arguments for call are not valid.
- SkipInfo('TestJit', 'test_variant_consistency_jit', dtypes=(torch.complex64,)),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.complex64,)),
# RuntimeError: aliasOp != torch::jit::getOperatorAliasMap().end()
# INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":157,
# please report a bug to PyTorch.
- SkipInfo('TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)),
)
),
OpInfo('norm',
skips=(
# RuntimeError not raised :
# Expected RuntimeError when calling with input.device=cpu and out.device=cuda
- SkipInfo('TestCommon', 'test_out'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out'),
# RuntimeError:
# Arguments for call are not valid.
- SkipInfo('TestJit', 'test_variant_consistency_jit', dtypes=(torch.complex64,)),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.complex64,)),
# RuntimeError: aliasOp != torch::jit::getOperatorAliasMap().end()
# INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":157,
# please report a bug to PyTorch.
- SkipInfo('TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)),
)
),
OpInfo('norm',
backward_dtypesIfCPU=floating_and_complex_types_and(torch.float16, torch.bfloat16),
skips=(
# following 3 tests failed intermittenly
- SkipInfo('TestJit', 'test_variant_consistency_jit',
- device_type='cpu', dtypes=(torch.complex64,)),
- SkipInfo('TestGradients', 'test_fn_grad',
- device_type='cpu', dtypes=(torch.complex128,)),
- SkipInfo('TestGradients', 'test_fn_gradgrad',
- device_type='cpu', dtypes=(torch.complex128,)),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit',
+ device_type='cpu', dtypes=(torch.complex64,)),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_fn_grad',
+ device_type='cpu', dtypes=(torch.complex128,)),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestGradients', 'test_fn_gradgrad',
+ device_type='cpu', dtypes=(torch.complex128,)),
)
),
OpInfo('t',
dtypesIfCUDA=floating_types_and(torch.bfloat16, torch.float16),
supports_out=False,
skips=(
- SkipInfo(
- "TestJit",
- "test_variant_consistency_jit",
- dtypes=(torch.float32,),
- ),
+ DecorateInfo(unittest.skip("Skipped!"),
+ "TestJit",
+ "test_variant_consistency_jit",
+ dtypes=(torch.float32,),
+ ),
),
),
OpInfo(
# RuntimeError: aliasOp != torch::jit::getOperatorAliasMap().end()
# INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":159,
# please report a bug to PyTorch.
- SkipInfo('TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)),
),
sample_inputs_func=sample_inputs_tensorinv,
supports_forward_ad=True,
backward_dtypesIfCPU=floating_types(),
dtypesIfCUDA=floating_types_and(torch.bfloat16, torch.float16),
skips=(
- SkipInfo(
- "TestJit",
- "test_variant_consistency_jit",
- dtypes=(torch.float32,),
- ),
+ DecorateInfo(unittest.skip("Skipped!"),
+ "TestJit",
+ "test_variant_consistency_jit",
+ dtypes=(torch.float32,),
+ ),
),
),
OpInfo(
supports_gradgrad=False,
gradcheck_nondet_tol=1e-15,
skips=(
- SkipInfo(
- "TestJit",
- "test_variant_consistency_jit",
- dtypes=(torch.float32,),
- ),
+ DecorateInfo(unittest.skip("Skipped!"),
+ "TestJit",
+ "test_variant_consistency_jit",
+ dtypes=(torch.float32,),
+ ),
),
),
ReductionOpInfo(
ref=reference_reduction_numpy(np.all),
skips=(
# FIXME: does not support passing keepdim without dim
- SkipInfo('TestReductions', 'test_dim_default_keepdim'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_default_keepdim'),
# FIXME: does not support dim=None
- SkipInfo('TestReductions', 'test_dim_none'),
- SkipInfo('TestReductions', 'test_dim_none_keepdim'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_none'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_none_keepdim'),
# FIXME: uint8 input returns uint8 instead of bool
- SkipInfo('TestReductions', 'test_result_dtype',
- dtypes=[torch.uint8]),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_result_dtype', dtypes=[torch.uint8]),
),
),
ReductionOpInfo(
ref=reference_reduction_numpy(np.any),
skips=(
# FIXME: does not support passing keepdim without dim
- SkipInfo('TestReductions', 'test_dim_default_keepdim'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_default_keepdim'),
# FIXME: does not support dim=None
- SkipInfo('TestReductions', 'test_dim_none'),
- SkipInfo('TestReductions', 'test_dim_none_keepdim'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_none'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_none_keepdim'),
# FIXME: uint8 input returns uint8 instead of bool
- SkipInfo('TestReductions', 'test_result_dtype',
- dtypes=[torch.uint8]),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_result_dtype', dtypes=[torch.uint8]),
),
),
ReductionOpInfo(
ref=reference_reduction_numpy(np.amax),
skips=(
# FIXME: sum reduces all dimensions when dim=[]
- SkipInfo('TestReductions', 'test_dim_empty'),
- SkipInfo('TestReductions', 'test_dim_empty_keepdim'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'),
),
),
ReductionOpInfo(
ref=reference_reduction_numpy(np.amin),
skips=(
# FIXME: sum reduces all dimensions when dim=[]
- SkipInfo('TestReductions', 'test_dim_empty'),
- SkipInfo('TestReductions', 'test_dim_empty_keepdim'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'),
),
),
ReductionOpInfo(
ref=reference_reduction_numpy(np.argmax, supports_keepdims=False),
skips=(
# FIXME: keepdim parameter is ignored when dim=None
- SkipInfo('TestReductions', 'test_dim_default_keepdim'),
- SkipInfo('TestReductions', 'test_dim_none_keepdim'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_default_keepdim'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_none_keepdim'),
),
),
ReductionOpInfo(
ref=reference_reduction_numpy(np.argmin, supports_keepdims=False),
skips=(
# FIXME: keepdim parameter is ignored when dim=None
- SkipInfo('TestReductions', 'test_dim_default_keepdim'),
- SkipInfo('TestReductions', 'test_dim_none_keepdim'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_default_keepdim'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_none_keepdim'),
),
),
ReductionOpInfo(
ref=reference_reduction_numpy(np.count_nonzero),
skips=(
# FIXME: count_nonzero does not accept keepdim kwarg
- SkipInfo('TestReductions', 'test_dim_default_keepdim'),
- SkipInfo('TestReductions', 'test_dim_none_keepdim'),
- SkipInfo('TestReductions', 'test_dim_single_keepdim'),
- SkipInfo('TestReductions', 'test_dim_empty_keepdim'),
- SkipInfo('TestReductions', 'test_dim_multi_keepdim'),
- SkipInfo('TestReductions', 'test_dim_multi_unsorted_keepdim'),
- SkipInfo('TestReductions', 'test_dim_offbounds_keepdim'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_default_keepdim'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_none_keepdim'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_single_keepdim'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_multi_keepdim'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_multi_unsorted_keepdim'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_offbounds_keepdim'),
# FIXME: dim=[] reduces all dimensions
- SkipInfo('TestReductions', 'test_dim_empty'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'),
),
),
ReductionOpInfo(
ref=reference_reduction_numpy(np.mean),
skips=(
# FIXME: mean does not support passing keepdim without passing dim
- SkipInfo('TestReductions', 'test_dim_default_keepdim'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_default_keepdim'),
# FIXME: mean reduces all dimensions when dim=[]
- SkipInfo('TestReductions', 'test_dim_empty'),
- SkipInfo('TestReductions', 'test_dim_empty_keepdim'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'),
# FIXME: mean does not support passing None to dim
- SkipInfo('TestReductions', 'test_dim_none'),
- SkipInfo('TestReductions', 'test_dim_none_keepdim'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_none'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_none_keepdim'),
# FIXME: improve precision
- SkipInfo('TestReductions', 'test_noncontiguous_all',
- dtypes=[torch.float16]),
- SkipInfo('TestReductions', 'test_ref_small_input',
- dtypes=[torch.float16]),
- SkipInfo('TestReductions', 'test_ref_extremal_values',
- device_type='cuda', dtypes=[torch.complex64]),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_noncontiguous_all',
+ dtypes=[torch.float16]),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_small_input',
+ dtypes=[torch.float16]),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_extremal_values',
+ device_type='cuda', dtypes=[torch.complex64]),
),
),
ReductionOpInfo(
skips=(
# RuntimeError: deepEquals(input.iValue, deepCopiedInput)INTERNAL ASSERT FAILED at
# "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":142, please report a bug to PyTorch.
- SkipInfo('TestJit', 'test_variant_consistency_jit'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'),
# FIXME: prod reduces all dimensions when dim=[]
- SkipInfo('TestReductions', 'test_dim_empty'),
- SkipInfo('TestReductions', 'test_dim_empty_keepdim'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'),
# FIXME: improve precision
- SkipInfo('TestReductions', 'test_noncontiguous_all',
- dtypes=[torch.float16]),
- SkipInfo('TestReductions', 'test_ref_small_input',
- dtypes=[torch.float16]),
- SkipInfo('TestReductions', 'test_ref_duplicate_values',
- device_type='cuda', dtypes=[torch.float16]),
- SkipInfo('TestReductions', 'test_ref_extremal_values',
- device_type='cuda', dtypes=[torch.complex64]),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_noncontiguous_all',
+ dtypes=[torch.float16]),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_small_input',
+ dtypes=[torch.float16]),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_duplicate_values',
+ device_type='cuda', dtypes=[torch.float16]),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_extremal_values',
+ device_type='cuda', dtypes=[torch.complex64]),
),
),
ReductionOpInfo(
ref=reference_reduction_numpy(np.prod),
skips=(
# FIXME: prod does not support passing keepdim without passing dim
- SkipInfo('TestReductions', 'test_dim_default_keepdim'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_default_keepdim'),
# FIXME: prod reduces all dimensions when dim=[]
- SkipInfo('TestReductions', 'test_dim_empty'),
- SkipInfo('TestReductions', 'test_dim_empty_keepdim'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'),
# FIXME: prod does not support passing None to dim
- SkipInfo('TestReductions', 'test_dim_none'),
- SkipInfo('TestReductions', 'test_dim_none_keepdim'),
- # FIXME: improve precision, failing with nan != inf
- SkipInfo('TestReductions', 'test_ref_small_input',
- dtypes=[torch.float16, torch.complex64]),
- SkipInfo('TestReductions', 'test_ref_duplicate_values',
- dtypes=[torch.uint8, torch.float16, torch.complex64]),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_none'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_none_keepdim'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_small_input',
+ dtypes=[torch.float16, torch.complex64]),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_duplicate_values',
+ dtypes=[torch.uint8, torch.float16, torch.complex64]),
),
),
ReductionOpInfo(
ref=reference_reduction_numpy(np.sum),
skips=(
# FIXME: sum does not support passing keepdim without passing dim
- SkipInfo('TestReductions', 'test_dim_default_keepdim'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_default_keepdim'),
# FIXME: sum reduces all dimensions when dim=[]
- SkipInfo('TestReductions', 'test_dim_empty'),
- SkipInfo('TestReductions', 'test_dim_empty_keepdim'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'),
# FIXME: sum does not support passing None to dim
- SkipInfo('TestReductions', 'test_dim_none'),
- SkipInfo('TestReductions', 'test_dim_none_keepdim'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_none'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_none_keepdim'),
# FIXME: improve precision
- SkipInfo('TestReductions', 'test_noncontiguous_all',
- dtypes=[torch.float16]),
- SkipInfo('TestReductions', 'test_ref_small_input',
- dtypes=[torch.float16]),
- SkipInfo('TestReductions', 'test_ref_duplicate_values',
- dtypes=[torch.float16]),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_noncontiguous_all',
+ dtypes=[torch.float16]),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_small_input',
+ dtypes=[torch.float16]),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_duplicate_values',
+ dtypes=[torch.float16]),
),
),
ReductionOpInfo(
ref=reference_reduction_numpy(np.nansum),
skips=(
# FIXME: nansum does not support passing keepdim without passing dim
- SkipInfo('TestReductions', 'test_dim_default_keepdim'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_default_keepdim'),
# FIXME: nansum reduces all dimensions when dim=[]
- SkipInfo('TestReductions', 'test_dim_empty'),
- SkipInfo('TestReductions', 'test_dim_empty_keepdim'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'),
# FIXME: nansum does not support passing None to dim
- SkipInfo('TestReductions', 'test_dim_none'),
- SkipInfo('TestReductions', 'test_dim_none_keepdim'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_none'),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_none_keepdim'),
# FIXME: improve precision
- SkipInfo('TestReductions', 'test_noncontiguous_all',
- dtypes=[torch.float16]),
- SkipInfo('TestReductions', 'test_ref_small_input',
- dtypes=[torch.float16]),
- SkipInfo('TestReductions', 'test_ref_duplicate_values',
- dtypes=[torch.float16]),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_noncontiguous_all',
+ dtypes=[torch.float16]),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_small_input',
+ dtypes=[torch.float16]),
+ DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_duplicate_values',
+ dtypes=[torch.float16]),
),
),
OpInfo(
supports_out=False,
sample_inputs_func=sample_inputs_nll_loss,
skips=(
- SkipInfo(
- "TestJit",
- "test_variant_consistency_jit",
- dtypes=(torch.float32,),
- ),
+ DecorateInfo(unittest.skip("Skipped!"),
+ "TestJit",
+ "test_variant_consistency_jit",
+ dtypes=(torch.float32,),),
),
),
]