// raise a warning while resizing if output has one or more elements
// See https://github.com/pytorch/pytorch/pull/62560#discussion_r687363362
// for understanding why at::native::resize_output is not called directly.
- if (at::native::resize_output_check(result, result_size)) {
+ // if (at::native::resize_output_check(result, result_size)) {
+ // TODO: restore the above, see https://github.com/pytorch/pytorch/issues/64709
+
+ if (result.sizes() != result_size) {
result.resize_(result_size, first_tensor_mem_format);
}
// skip resizing if size of result is same as expected
// raise a warning while resizing if output has one or more elements
- at::native::resize_output(result, result_sizes);
+ // at::native::resize_output(result, result_sizes);
+ // TODO: restore the above, see https://github.com/pytorch/pytorch/issues/64709
+
+ if (result.sizes() != result_sizes) {
+ result.resize_(result_sizes);
+ }
+
stack_serial_stub(kCPU, result, tensors, dim);
return true;
}
// raise a warning while resizing if output has one or more elements
// See https://github.com/pytorch/pytorch/pull/62560#discussion_r687363362
// for understanding why at::native::resize_output is not called directly.
- if (at::native::resize_output_check(out, size)) {
+ // if (at::native::resize_output_check(out, size)) {
+ // TODO: restore the above, see https://github.com/pytorch/pytorch/issues/64709
+
+ if (out.sizes() != size) {
out.resize_(size, memory_format);
}
OpInfo('stack',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_stack,
- assert_autodiffed=True),
+ assert_autodiffed=True,
+ skips=(
+ # TODO: see https://github.com/pytorch/pytorch/issues/64709
+ SkipInfo('TestCommon', 'test_out'),
+ )),
OpInfo('hstack',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_hstack_dstack_vstack,
- supports_forward_ad=True),
+ supports_forward_ad=True,
+ skips=(
+ # TODO: see https://github.com/pytorch/pytorch/issues/64709
+ SkipInfo('TestCommon', 'test_out'),
+ )),
OpInfo('hypot',
dtypes=floating_types(),
dtypesIfCPU=floating_types_and(torch.bfloat16),
supports_forward_ad=True,
assert_autodiffed=True,
skips=(
+ # TODO: see https://github.com/pytorch/pytorch/issues/64709
+ SkipInfo('TestCommon', 'test_out'),
# RuntimeError: Arguments for call not valid.
# Expected a value of type 'List[Tensor]' for argument
# 'tensors' but instead found type 'Tensor (inferred)'.
sample_inputs_func=sample_inputs_hstack_dstack_vstack,
supports_forward_ad=True,
skips=(
+ # TODO: see https://github.com/pytorch/pytorch/issues/64709
+ SkipInfo('TestCommon', 'test_out'),
# RuntimeError: _fn() Expected a value of type
# 'Tensor (inferred)' for argument 't0' but instead found type 'tuple'.
SkipInfo('TestJit', 'test_jit_alias_remapping'),)),
OpInfo('dstack',
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
sample_inputs_func=sample_inputs_hstack_dstack_vstack,
- supports_forward_ad=True),
+ supports_forward_ad=True,
+ skips=(
+ # TODO: see https://github.com/pytorch/pytorch/issues/64709
+ SkipInfo('TestCommon', 'test_out'),
+ )),
OpInfo('unfold',
op=lambda x, *args: x.unfold(*args),
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),