net = nn.DataParallel(l)
out = net(i)
self.assertEqual(out.get_device(), 0)
- self.assertEqual(out.data, expected_out)
+ self.assertEqual(out.data, expected_out, dtype2prec[dtype])
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
@repeat_test_for_types(ALL_TENSORTYPES)
n = nn.DataParallel(Net())
out = n(input=i)
self.assertEqual(out.get_device(), 0)
- self.assertEqual(out.data, expected_out)
+ self.assertEqual(out.data, expected_out, dtype2prec[dtype])
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
@repeat_test_for_types(ALL_TENSORTYPES)
n = nn.DataParallel(Net())
out = n(input={'data': i, 'unused': []})
self.assertEqual(out.get_device(), 0)
- self.assertEqual(out.data, expected_out)
+ self.assertEqual(out.data, expected_out, dtype2prec[dtype])
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
@repeat_test_for_types(ALL_TENSORTYPES)
n = nn.DataParallel(Net())
out = n(input={'data': i, 'unused': {}})
self.assertEqual(out.get_device(), 0)
- self.assertEqual(out.data, expected_out)
+ self.assertEqual(out.data, expected_out, dtype2prec[dtype])
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
@repeat_test_for_types(ALL_TENSORTYPES)
n = nn.DataParallel(Net())
out = n(input={'data': i, 'unused': ()})
self.assertEqual(out.get_device(), 0)
- self.assertEqual(out.data, expected_out)
+ self.assertEqual(out.data, expected_out, dtype2prec[dtype])
@unittest.skipIf(not TEST_MULTIGPU, "multi-GPU not supported")
def test_data_parallel_device_args(self):