queue.put(var.requires_grad)
+def integer_parameter_serialization(iparam):
+ iparam + 1
+
+
def autograd_sharing(queue, ready, master_modified, device, is_parameter):
var = queue.get()
ready.set()
param = Parameter(torch.arange(1., 26, device='cuda').view(5, 5))
self._test_autograd_sharing(param, mp.get_context('spawn'), is_parameter=True)
+ @unittest.skipIf(NO_MULTIPROCESSING_SPAWN, "Disabled for environments that \
+ don't support multiprocessing with spawn start method")
+ def test_integer_parameter_serialization(self):
+ iparam = torch.nn.Parameter(torch.tensor(0, dtype=torch.int64), requires_grad=False)
+
+ ctx = mp.get_context('spawn')
+ p = ctx.Process(target=integer_parameter_serialization, args=(iparam,))
+ p.start()
+ p.join()
+
def test_empty_shared(self):
t = torch.Tensor()
t.share_memory_()
storage_offset, size, stride, requires_grad = metadata
t = torch._utils._rebuild_tensor(storage, storage_offset, size, stride)
if cls == torch.nn.parameter.Parameter:
- t = torch.nn.parameter.Parameter(t)
- t.requires_grad = requires_grad
+ # we have to pass requires_grad into constructor, rather than set it as an
+ # attribute later, because it's an important check for Integer Tensors to
+ # have requires_grad=False (or else they raise an error)
+ t = torch.nn.parameter.Parameter(t, requires_grad=requires_grad)
+ else:
+ t.requires_grad = requires_grad
return t