Skip to content

Commit

Permalink
[Sparse] Rename as_sparse to to_sparse, dense to to_dense. (dmlc#5170)
Browse files Browse the repository at this point in the history
* as_sp_to_sp

* dense

* revert_mock

* test

* revert

Co-authored-by: Steve <[email protected]>
  • Loading branch information
frozenbugs and Steve authored Jan 13, 2023
1 parent 5f5db2d commit 9334421
Show file tree
Hide file tree
Showing 10 changed files with 41 additions and 41 deletions.
6 changes: 3 additions & 3 deletions docs/source/api/python/dgl.sparse_v0.rst
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,7 @@ Attributes and methods
SparseMatrix.csc
SparseMatrix.coalesce
SparseMatrix.has_duplicate
SparseMatrix.dense
SparseMatrix.to_dense
SparseMatrix.to
SparseMatrix.cuda
SparseMatrix.cpu
Expand Down Expand Up @@ -134,8 +134,8 @@ Attributes and methods
DiagMatrix.dtype
DiagMatrix.device
DiagMatrix.val
DiagMatrix.as_sparse
DiagMatrix.dense
DiagMatrix.to_sparse
DiagMatrix.to_dense
DiagMatrix.to
DiagMatrix.cuda
DiagMatrix.cpu
Expand Down
6 changes: 3 additions & 3 deletions python/dgl/sparse/diag_matrix.py
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,7 @@ def device(self) -> torch.device:
"""
return self.val.device

def as_sparse(self) -> SparseMatrix:
def to_sparse(self) -> SparseMatrix:
"""Convert the diagonal matrix into a sparse matrix object
Returns
Expand All @@ -112,7 +112,7 @@ def as_sparse(self) -> SparseMatrix:
>>> import torch
>>> val = torch.ones(5)
>>> mat = diag(val)
>>> sp_mat = mat.as_sparse()
>>> sp_mat = mat.to_sparse()
>>> print(sp_mat)
SparseMatrix(indices=tensor([[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4]]),
Expand All @@ -122,7 +122,7 @@ def as_sparse(self) -> SparseMatrix:
row = col = torch.arange(len(self.val)).to(self.device)
return from_coo(row=row, col=col, val=self.val, shape=self.shape)

def dense(self) -> torch.Tensor:
def to_dense(self) -> torch.Tensor:
"""Return a dense representation of the matrix.
Returns
Expand Down
2 changes: 1 addition & 1 deletion python/dgl/sparse/elementwise_op_diag.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ def diag_add(
"The shape of diagonal matrix D1 "
f"{D1.shape} and sparse matrix D2 {D2.shape} must match."
)
D1 = D1.as_sparse()
D1 = D1.to_sparse()
return D1 + D2
# Python falls back to D2.__radd__(D1) then TypeError when NotImplemented
# is returned.
Expand Down
2 changes: 1 addition & 1 deletion python/dgl/sparse/matmul.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ def spmm(A: Union[SparseMatrix, DiagMatrix], X: torch.Tensor) -> torch.Tensor:

# The input is a DiagMatrix. Cast it to SparseMatrix
if not isinstance(A, SparseMatrix):
A = A.as_sparse()
A = A.to_sparse()
return torch.ops.dgl_sparse.spmm(A.c_sparse_matrix, X)


Expand Down
2 changes: 1 addition & 1 deletion python/dgl/sparse/sparse_matrix.py
Original file line number Diff line number Diff line change
Expand Up @@ -126,7 +126,7 @@ def csc(self) -> Tuple[torch.Tensor, ...]:
"""
return self.c_sparse_matrix.csc()

def dense(self) -> torch.Tensor:
def to_dense(self) -> torch.Tensor:
"""Return a dense representation of the matrix.
Returns
Expand Down
2 changes: 1 addition & 1 deletion tests/pytorch/sparse/test_diag.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ def test_diag(val_shape, mat_shape):
assert mat.device == val.device

# as_sparse
sp_mat = mat.as_sparse()
sp_mat = mat.to_sparse()
# shape
assert tuple(sp_mat.shape) == mat_shape
# nnz
Expand Down
34 changes: 17 additions & 17 deletions tests/pytorch/sparse/test_elementwise_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,9 +25,9 @@ def test_add_coo(val_shape):
val = torch.randn(row.shape + val_shape).to(ctx)
B = from_coo(row, col, val, shape=A.shape)

sum1 = (A + B).dense()
sum2 = add(A, B).dense()
dense_sum = A.dense() + B.dense()
sum1 = (A + B).to_dense()
sum2 = add(A, B).to_dense()
dense_sum = A.to_dense() + B.to_dense()

assert torch.allclose(dense_sum, sum1)
assert torch.allclose(dense_sum, sum2)
Expand All @@ -51,9 +51,9 @@ def test_add_csr(val_shape):
val = torch.randn(indices.shape + val_shape).to(ctx)
B = from_csr(indptr, indices, val, shape=A.shape)

sum1 = (A + B).dense()
sum2 = add(A, B).dense()
dense_sum = A.dense() + B.dense()
sum1 = (A + B).to_dense()
sum2 = add(A, B).to_dense()
dense_sum = A.to_dense() + B.to_dense()

assert torch.allclose(dense_sum, sum1)
assert torch.allclose(dense_sum, sum2)
Expand All @@ -77,9 +77,9 @@ def test_add_csc(val_shape):
val = torch.randn(indices.shape + val_shape).to(ctx)
B = from_csc(indptr, indices, val, shape=A.shape)

sum1 = (A + B).dense()
sum2 = add(A, B).dense()
dense_sum = A.dense() + B.dense()
sum1 = (A + B).to_dense()
sum2 = add(A, B).to_dense()
dense_sum = A.to_dense() + B.to_dense()

assert torch.allclose(dense_sum, sum1)
assert torch.allclose(dense_sum, sum2)
Expand All @@ -98,9 +98,9 @@ def test_add_diag(val_shape):
D1 = diag(torch.randn(val_shape).to(ctx), shape=shape)
D2 = diag(torch.randn(val_shape).to(ctx), shape=shape)

sum1 = (D1 + D2).dense()
sum2 = add(D1, D2).dense()
dense_sum = D1.dense() + D2.dense()
sum1 = (D1 + D2).to_dense()
sum2 = add(D1, D2).to_dense()
dense_sum = D1.to_dense() + D2.to_dense()

assert torch.allclose(dense_sum, sum1)
assert torch.allclose(dense_sum, sum2)
Expand All @@ -118,11 +118,11 @@ def test_add_sparse_diag(val_shape):
val_shape = (shape[0],) + val_shape
D = diag(torch.randn(val_shape).to(ctx), shape=shape)

sum1 = (A + D).dense()
sum2 = (D + A).dense()
sum3 = add(A, D).dense()
sum4 = add(D, A).dense()
dense_sum = A.dense() + D.dense()
sum1 = (A + D).to_dense()
sum2 = (D + A).to_dense()
sum3 = add(A, D).to_dense()
sum4 = add(D, A).to_dense()
dense_sum = A.to_dense() + D.to_dense()

assert torch.allclose(dense_sum, sum1)
assert torch.allclose(dense_sum, sum2)
Expand Down
24 changes: 12 additions & 12 deletions tests/pytorch/sparse/test_matmul.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ def test_bspmm(create_func, shape, nnz):
sparse_result.backward(grad)

XX = clone_detach_and_grad(X)
torch_A = A.dense().clone().detach().requires_grad_()
torch_A = A.to_dense().clone().detach().requires_grad_()
torch_result = torch_A.permute(2, 0, 1) @ XX.permute(2, 0, 1)

torch_result.backward(grad.permute(2, 0, 1))
Expand Down Expand Up @@ -103,14 +103,14 @@ def test_spspmm(create_func1, create_func2, shape_n_m, shape_k, nnz1, nnz2):
torch_A3.backward(torch_A3_grad)

with torch.no_grad():
assert torch.allclose(A3.dense(), torch_A3.to_dense(), atol=1e-05)
assert torch.allclose(A3.to_dense(), torch_A3.to_dense(), atol=1e-05)
assert torch.allclose(
val_like(A1, A1.val.grad).dense(),
val_like(A1, A1.val.grad).to_dense(),
torch_A1.grad.to_dense(),
atol=1e-05,
)
assert torch.allclose(
val_like(A2, A2.val.grad).dense(),
val_like(A2, A2.val.grad).to_dense(),
torch_A2.grad.to_dense(),
atol=1e-05,
)
Expand Down Expand Up @@ -161,20 +161,20 @@ def test_sparse_diag_mm(create_func, sparse_shape, nnz):
B.val.backward(grad)

torch_A = sparse_matrix_to_torch_sparse(A)
torch_D = sparse_matrix_to_torch_sparse(D.as_sparse())
torch_D = sparse_matrix_to_torch_sparse(D.to_sparse())
torch_B = torch.sparse.mm(torch_A, torch_D)
torch_B_grad = sparse_matrix_to_torch_sparse(B, grad)
torch_B.backward(torch_B_grad)

with torch.no_grad():
assert torch.allclose(B.dense(), torch_B.to_dense(), atol=1e-05)
assert torch.allclose(B.to_dense(), torch_B.to_dense(), atol=1e-05)
assert torch.allclose(
val_like(A, A.val.grad).dense(),
val_like(A, A.val.grad).to_dense(),
torch_A.grad.to_dense(),
atol=1e-05,
)
assert torch.allclose(
diag(D.val.grad, D.shape).dense(),
diag(D.val.grad, D.shape).to_dense(),
torch_D.grad.to_dense(),
atol=1e-05,
)
Expand All @@ -195,20 +195,20 @@ def test_diag_sparse_mm(create_func, sparse_shape, nnz):
B.val.backward(grad)

torch_A = sparse_matrix_to_torch_sparse(A)
torch_D = sparse_matrix_to_torch_sparse(D.as_sparse())
torch_D = sparse_matrix_to_torch_sparse(D.to_sparse())
torch_B = torch.sparse.mm(torch_D, torch_A)
torch_B_grad = sparse_matrix_to_torch_sparse(B, grad)
torch_B.backward(torch_B_grad)

with torch.no_grad():
assert torch.allclose(B.dense(), torch_B.to_dense(), atol=1e-05)
assert torch.allclose(B.to_dense(), torch_B.to_dense(), atol=1e-05)
assert torch.allclose(
val_like(A, A.val.grad).dense(),
val_like(A, A.val.grad).to_dense(),
torch_A.grad.to_dense(),
atol=1e-05,
)
assert torch.allclose(
diag(D.val.grad, D.shape).dense(),
diag(D.val.grad, D.shape).to_dense(),
torch_D.grad.to_dense(),
atol=1e-05,
)
2 changes: 1 addition & 1 deletion tests/pytorch/sparse/test_sparse_matrix.py
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,7 @@ def test_dense(val_shape):
col = torch.tensor([2, 4, 3]).to(ctx)
val = torch.randn(val_shape).to(ctx)
A = from_coo(row, col, val)
A_dense = A.dense()
A_dense = A.to_dense()

shape = A.shape + val.shape[1:]
mat = torch.zeros(shape, device=ctx)
Expand Down
2 changes: 1 addition & 1 deletion tests/pytorch/sparse/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,7 @@ def rand_csc_uncoalesced(shape, nnz, dev):


def sparse_matrix_to_dense(A: SparseMatrix):
dense = A.dense()
dense = A.to_dense()
return clone_detach_and_grad(dense)


Expand Down

0 comments on commit 9334421

Please sign in to comment.