dummy_out = func(*inputs)
grad_y = torch.randn_like(dummy_out, device=device, dtype=dtype, requires_grad=True)
+ # Issue #15353: test mkldnn double backward, don't run gradgradcheck due
+ # to imprecision issues
+ if dtype == torch.float:
+ g, = torch.autograd.grad(dummy_out.sum(), x, create_graph=True)
+ return g.requires_grad
+
return gradgradcheck(func, inputs, (grad_y,))
def test_conv_double_backward(self):
for stride, padding, chan_in, chan_out, dilation in \
product([1, 2], [0, 1, 2], [2], [3], dilations):
for no_weight in (True, False):
- result = self.run_conv_double_back_test(kern, stride,
- padding, chan_in, chan_out,
- batch_size, inp_size, dilation,
- no_weight)
- self.assertTrue(result,
- "Conv double backward test failed with parameters:" +
- "\nkern: " + str(kern) +
- "\nstride: " + str(stride) +
- "\npadding: " + str(padding) +
- "\nchan_in: " + str(chan_in) +
- "\nchan_out: " + str(chan_out) +
- "\nbatch_size: " + str(batch_size) +
- "\ninp_size: " + str(inp_size) +
- "\ndilation: " + str(dilation))
+ for dtype in (torch.float, torch.double):
+ result = self.run_conv_double_back_test(kern, stride,
+ padding, chan_in, chan_out,
+ batch_size, inp_size, dilation,
+ no_weight, dtype=dtype)
+ self.assertTrue(result,
+ "Conv double backward test failed with parameters:" +
+ "\nkern: " + str(kern) +
+ "\nstride: " + str(stride) +
+ "\npadding: " + str(padding) +
+ "\nchan_in: " + str(chan_in) +
+ "\nchan_out: " + str(chan_out) +
+ "\nbatch_size: " + str(batch_size) +
+ "\ninp_size: " + str(inp_size) +
+ "\ndilation: " + str(dilation) +
+ "\ndtype: " + str(dtype))
def test_conv_double_backward_no_bias(self):
kern = 3
- name: mkldnn_convolution(Tensor self, Tensor weight, Tensor bias, IntList padding, IntList stride, IntList dilation, int64_t groups)
self, weight, bias: mkldnn_convolution_backward(self, grad, weight, padding, stride, dilation, groups, grad_input_mask)
+- name: mkldnn_convolution_backward(Tensor self, Tensor grad_output, Tensor weight, IntList padding, IntList stride, IntList dilation, int64_t groups, std::array<bool,3> output_mask)
+ grad_output, self, weight: _convolution_double_backward(grads[0], grads[1], grads[2], grad_output, weight, self, stride, padding, dilation, false, std::vector<int64_t>(padding.size(), 0), groups, false, false, false, grad_input_mask)
+
# fft
- name: _fft_with_size(Tensor self, int64_t signal_ndim, bool complex_input, bool complex_output, bool inverse, IntList checked_signal_sizes, bool normalized, bool onesided, IntList output_sizes)
self: fft_backward(self, grad, signal_ndim, complex_input, complex_output, inverse, checked_signal_sizes, normalized, onesided, output_sizes)