y1.backward()
y2.backward()
mask = (S_dense == 0)
+ self.assertTrue(S.grad.is_coalesced())
self.assertEqual(S.grad.to_dense(), S_dense.grad.masked_fill_(mask, 0))
- if not self.is_uncoalesced:
- test_shape(7, 8, 9, 20)
+ test_shape(7, 8, 9, 20)
+
+ @skipIfRocm
+ def test_sparse_mm(self):
+ def test_shape(d1, d2, d3, nnz):
+ D = torch.randn(d2, d3, device=self.device).requires_grad_(True)
+ S = self._gen_sparse(2, nnz, [d1, d2])[0]
+ S_dense = S.to_dense().requires_grad_(True)
+ S.requires_grad_(True)
+ self.assertEqual(torch.sparse.mm(S, D), torch.mm(S_dense, D))
+ y1 = torch.sparse.mm(S, D).sum()
+ y2 = torch.mm(S_dense, D).sum()
+ y1.backward()
+ y2.backward()
+ mask = (S_dense == 0)
+ self.assertTrue(S.grad.is_coalesced())
+ self.assertEqual(S.grad.to_dense(), S_dense.grad.masked_fill_(mask, 0))
+
+ test_shape(7, 8, 9, 20)
@skipIfRocm
def test_dsmm(self):
__all__ = [
'addmm',
+ 'mm',
'sum',
]
def addmm(mat, mat1, mat2, beta=1, alpha=1):
r"""
This function does exact same thing as :func:`torch.addmm` in the forward,
- except that it supports backward for coalesced sparse matrix `mat1`.
+ except that it supports backward for sparse matrix :attr:`mat1`. :attr:`mat1`
+ need to have `sparse_dim = 2`. Note that the gradients of :attr:`mat1` is a
+ coalesced sparse tensor.
Args:
mat (Tensor): a dense matrix to be added
- mat1 (Tensor): a sparse matrix to be multiplied
+ mat1 (SparseTensor): a sparse matrix to be multiplied
mat2 (Tensor): a dense matrix be multiplied
beta (Number, optional): multiplier for :attr:`mat` (:math:`\beta`)
alpha (Number, optional): multiplier for :math:`mat1 @ mat2` (:math:`\alpha`)
return torch._sparse_addmm(mat, mat1, mat2, beta=beta, alpha=alpha)
+def mm(mat1, mat2):
+ r"""
+ Performs a matrix multiplication of the sparse matrix :attr:`mat1`
+ and dense matrix :attr:`mat2`. Similar to :func:`torch.mm`, If :attr:`mat1` is a
+ :math:`(n \times m)` tensor, :attr:`mat2` is a :math:`(m \times p)` tensor, out will be a
+ :math:`(n \times p)` dense tensor. :attr:`mat1` need to have `sparse_dim = 2`.
+ This function also supports backward for both matrices. Note that the gradients of
+ :attr:`mat1` is a coalesced sparse tensor.
+
+ Args:
+ mat1 (SparseTensor): the first sparse matrix to be multiplied
+ mat2 (Tensor): the second dense matrix to be multiplied
+
+ Example::
+
+ >>> a = torch.randn(2, 3).to_sparse().requires_grad_(True)
+ >>> a
+ tensor(indices=tensor([[0, 0, 0, 1, 1, 1],
+ [0, 1, 2, 0, 1, 2]]),
+ values=tensor([ 1.5901, 0.0183, -0.6146, 1.8061, -0.0112, 0.6302]),
+ size=(2, 3), nnz=6, layout=torch.sparse_coo, requires_grad=True)
+
+ >>> b = torch.randn(3, 2, requires_grad=True)
+ >>> b
+ tensor([[-0.6479, 0.7874],
+ [-1.2056, 0.5641],
+ [-1.1716, -0.9923]], requires_grad=True)
+
+ >>> y = torch.sparse.mm(a, b)
+ >>> y
+ tensor([[-0.3323, 1.8723],
+ [-1.8951, 0.7904]], grad_fn=<SparseAddmmBackward>)
+ >>> y.sum().backward()
+ >>> a.grad
+ tensor(indices=tensor([[0, 0, 0, 1, 1, 1],
+ [0, 1, 2, 0, 1, 2]]),
+ values=tensor([ 0.1394, -0.6415, -2.1639, 0.1394, -0.6415, -2.1639]),
+ size=(2, 3), nnz=6, layout=torch.sparse_coo)
+ """
+ return torch._sparse_mm(mat1, mat2)
+
+
def sum(input, dim=None, dtype=None):
r"""
Returns the sum of each row of SparseTensor :attr:`input` in the given