Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Migrate old pytorch-lightning imports to modern lightning imports #9887

Open
wants to merge 2 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 8 additions & 7 deletions examples/pytorch_lightning/gin.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,10 @@
import os.path as osp

import pytorch_lightning as pl
import torch
import torch.nn.functional as F
from lightning import LightningModule, Trainer
from lightning.pytorch.callbacks import ModelCheckpoint
from lightning.pytorch.strategies import DDPStrategy
from torchmetrics import Accuracy

import torch_geometric.transforms as T
Expand All @@ -11,7 +13,7 @@
from torch_geometric.nn import GIN, MLP, global_add_pool


class Model(pl.LightningModule):
class Model(LightningModule):
def __init__(self, in_channels: int, out_channels: int,
hidden_channels: int = 64, num_layers: int = 3,
dropout: float = 0.5):
Expand Down Expand Up @@ -71,11 +73,10 @@ def configure_optimizers(self):
model = Model(dataset.num_node_features, dataset.num_classes)

devices = torch.cuda.device_count()
strategy = pl.strategies.DDPStrategy(accelerator='gpu')
checkpoint = pl.callbacks.ModelCheckpoint(monitor='val_acc', save_top_k=1,
mode='max')
trainer = pl.Trainer(strategy=strategy, devices=devices, max_epochs=50,
log_every_n_steps=5, callbacks=[checkpoint])
strategy = DDPStrategy(accelerator='gpu')
checkpoint = ModelCheckpoint(monitor='val_acc', save_top_k=1, mode='max')
trainer = Trainer(strategy=strategy, devices=devices, max_epochs=50,
log_every_n_steps=5, callbacks=[checkpoint])

trainer.fit(model, datamodule)
trainer.test(ckpt_path='best', datamodule=datamodule)
15 changes: 8 additions & 7 deletions examples/pytorch_lightning/graph_sage.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,10 @@
import os.path as osp

import pytorch_lightning as pl
import torch
import torch.nn.functional as F
from lightning import LightningModule, Trainer
from lightning.pytorch.callbacks import ModelCheckpoint
from lightning.pytorch.strategies import SingleDeviceStrategy
from torch.nn import BatchNorm1d
from torchmetrics import Accuracy

Expand All @@ -11,7 +13,7 @@
from torch_geometric.nn import GraphSAGE


class Model(pl.LightningModule):
class Model(LightningModule):
def __init__(self, in_channels: int, out_channels: int,
hidden_channels: int = 256, num_layers: int = 2,
dropout: float = 0.5):
Expand Down Expand Up @@ -71,11 +73,10 @@ def configure_optimizers(self):

model = Model(dataset.num_node_features, dataset.num_classes)

strategy = pl.strategies.SingleDeviceStrategy('cuda:0')
checkpoint = pl.callbacks.ModelCheckpoint(monitor='val_acc', save_top_k=1,
mode='max')
trainer = pl.Trainer(strategy=strategy, devices=1, max_epochs=20,
callbacks=[checkpoint])
strategy = SingleDeviceStrategy('cuda:0')
checkpoint = ModelCheckpoint(monitor='val_acc', save_top_k=1, mode='max')
trainer = Trainer(strategy=strategy, devices=1, max_epochs=20,
callbacks=[checkpoint])

trainer.fit(model, datamodule)
trainer.test(ckpt_path='best', datamodule=datamodule)
8 changes: 4 additions & 4 deletions examples/pytorch_lightning/relational_gnn.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,11 @@
import os.path as osp
from typing import Dict, List, Tuple

import pytorch_lightning as pl
import torch
import torch.nn.functional as F
from pytorch_lightning import LightningModule, Trainer
from pytorch_lightning.callbacks import ModelCheckpoint
from lightning import LightningModule, Trainer
from lightning.pytorch.callbacks import ModelCheckpoint
from lightning.pytorch.strategies import SingleDeviceStrategy
from torch import Tensor
from torchmetrics import Accuracy

Expand Down Expand Up @@ -116,7 +116,7 @@ def main():
batch = next(iter(loader))
model.common_step(batch)

strategy = pl.strategies.SingleDeviceStrategy('cuda:0')
strategy = SingleDeviceStrategy('cuda:0')
checkpoint = ModelCheckpoint(monitor='val_acc', save_top_k=1, mode='max')
trainer = Trainer(strategy=strategy, devices=1, max_epochs=20,
log_every_n_steps=5, callbacks=[checkpoint])
Expand Down
4 changes: 2 additions & 2 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ dependencies=[
[project.optional-dependencies]
graphgym=[
"protobuf<4.21",
"pytorch-lightning<2.3.0",
"lightning<2.3.0",
"yacs",
]
modelhub=[
Expand Down Expand Up @@ -187,7 +187,7 @@ filterwarnings = [
# Filter `captum` warnings:
"ignore:Setting backward hooks on ReLU activations:UserWarning",
"ignore:.*did not already require gradients, required_grads has been set automatically:UserWarning",
# Filter `pytorch_lightning` warnings:
# Filter `lightning` warnings:
"ignore:GPU available but not used:UserWarning",
]

Expand Down
53 changes: 28 additions & 25 deletions test/data/lightning/test_datamodule.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@
)

try:
from pytorch_lightning import LightningModule
from lightning import LightningModule
except ImportError:
LightningModule = torch.nn.Module

Expand Down Expand Up @@ -78,11 +78,12 @@ def configure_optimizers(self):
@onlyCUDA
@onlyOnline
@onlyFullTest
@withPackage('pytorch_lightning>=2.0.0', 'torchmetrics>=0.11.0')
@withPackage('lightning>=2.0.0', 'torchmetrics>=0.11.0')
@pytest.mark.parametrize('strategy_type', [None, 'ddp'])
def test_lightning_dataset(get_dataset, strategy_type):
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_only
import lightning as L
from lightning.fabric.utilities import rank_zero_only
from lightning.pytorch.strategies import DDPStrategy, SingleDeviceStrategy

@contextmanager
def expect_rank_zero_user_warning(match: str):
Expand All @@ -100,14 +101,14 @@ def expect_rank_zero_user_warning(match: str):

devices = 1 if strategy_type is None else torch.cuda.device_count()
if strategy_type == 'ddp':
strategy = pl.strategies.DDPStrategy(accelerator='gpu')
strategy = DDPStrategy(accelerator='gpu')
else:
strategy = pl.strategies.SingleDeviceStrategy(device='cuda:0')
strategy = SingleDeviceStrategy(device='cuda:0')

model = LinearGraphModule(dataset.num_features, 64, dataset.num_classes)

trainer = pl.Trainer(strategy=strategy, devices=devices, max_epochs=1,
log_every_n_steps=1)
trainer = L.Trainer(strategy=strategy, devices=devices, max_epochs=1,
log_every_n_steps=1)
with pytest.warns(UserWarning, match="'shuffle=True' option is ignored"):
datamodule = LightningDataset(train_dataset, val_dataset, test_dataset,
pred_dataset, batch_size=5,
Expand All @@ -129,8 +130,8 @@ def expect_rank_zero_user_warning(match: str):

# Test with `val_dataset=None` and `test_dataset=None`:
if strategy_type is None:
trainer = pl.Trainer(strategy=strategy, devices=devices, max_epochs=1,
log_every_n_steps=1)
trainer = L.Trainer(strategy=strategy, devices=devices, max_epochs=1,
log_every_n_steps=1)

datamodule = LightningDataset(train_dataset, batch_size=5)
assert str(datamodule) == ('LightningDataset(train_dataset=MUTAG(50), '
Expand Down Expand Up @@ -191,11 +192,12 @@ def configure_optimizers(self):
@onlyOnline
@onlyFullTest
@onlyNeighborSampler
@withPackage('pytorch_lightning>=2.0.0', 'torchmetrics>=0.11.0', 'scipy')
@withPackage('lightning>=2.0.0', 'torchmetrics>=0.11.0', 'scipy')
@pytest.mark.parametrize('loader', ['full', 'neighbor'])
@pytest.mark.parametrize('strategy_type', [None, 'ddp'])
def test_lightning_node_data(get_dataset, strategy_type, loader):
import pytorch_lightning as pl
import lightning as L
from lightning.pytorch.strategies import DDPStrategy, SingleDeviceStrategy

dataset = get_dataset(name='Cora')
data = dataset[0]
Expand All @@ -210,9 +212,9 @@ def test_lightning_node_data(get_dataset, strategy_type, loader):
devices = torch.cuda.device_count()

if strategy_type == 'ddp':
strategy = pl.strategies.DDPStrategy(accelerator='gpu')
strategy = DDPStrategy(accelerator='gpu')
else:
strategy = pl.strategies.SingleDeviceStrategy(device='cuda:0')
strategy = SingleDeviceStrategy(device='cuda:0')

if loader == 'full': # Set reasonable defaults for full-batch training:
batch_size = 1
Expand All @@ -225,8 +227,8 @@ def test_lightning_node_data(get_dataset, strategy_type, loader):
kwargs['num_neighbors'] = [5]
kwargs_repr += 'num_neighbors=[5], '

trainer = pl.Trainer(strategy=strategy, devices=devices, max_epochs=5,
log_every_n_steps=1)
trainer = L.Trainer(strategy=strategy, devices=devices, max_epochs=5,
log_every_n_steps=1)
datamodule = LightningNodeData(data, loader=loader, batch_size=batch_size,
num_workers=num_workers, **kwargs)

Expand Down Expand Up @@ -298,20 +300,21 @@ def preserve_context():
@onlyCUDA
@onlyFullTest
@onlyNeighborSampler
@withPackage('pytorch_lightning>=2.0.0', 'torchmetrics>=0.11.0')
@withPackage('lightning>=2.0.0', 'torchmetrics>=0.11.0')
def test_lightning_hetero_node_data(preserve_context, get_dataset):
import pytorch_lightning as pl
import lightning as L
from lightning.pytorch.strategies import DDPStrategy

data = get_dataset(name='hetero')[0]

model = LinearHeteroNodeModule(data['paper'].num_features,
int(data['paper'].y.max()) + 1)

devices = torch.cuda.device_count()
strategy = pl.strategies.DDPStrategy(accelerator='gpu')
strategy = DDPStrategy(accelerator='gpu')

trainer = pl.Trainer(strategy=strategy, devices=devices, max_epochs=5,
log_every_n_steps=1)
trainer = L.Trainer(strategy=strategy, devices=devices, max_epochs=5,
log_every_n_steps=1)
datamodule = LightningNodeData(data, loader='neighbor', num_neighbors=[5],
batch_size=32, num_workers=3)
assert isinstance(datamodule.graph_sampler, NeighborSampler)
Expand All @@ -323,7 +326,7 @@ def test_lightning_hetero_node_data(preserve_context, get_dataset):
assert trainer.test_loop._data_source.is_defined()


@withPackage('pytorch_lightning')
@withPackage('lightning')
def test_lightning_data_custom_sampler():
class DummySampler(BaseSampler):
def sample_from_edges(self, *args, **kwargs):
Expand All @@ -347,7 +350,7 @@ def sample_from_nodes(self, *args, **kwargs):
@onlyCUDA
@onlyFullTest
@onlyNeighborSampler
@withPackage('pytorch_lightning')
@withPackage('lightning')
def test_lightning_hetero_link_data():
torch.manual_seed(12345)

Expand Down Expand Up @@ -407,7 +410,7 @@ def test_lightning_hetero_link_data():


@onlyNeighborSampler
@withPackage('pytorch_lightning')
@withPackage('lightning')
def test_lightning_hetero_link_data_custom_store():
torch.manual_seed(12345)

Expand Down Expand Up @@ -445,7 +448,7 @@ def test_lightning_hetero_link_data_custom_store():

@onlyOnline
@onlyNeighborSampler
@withPackage('pytorch_lightning', 'scipy')
@withPackage('lightning', 'scipy')
def test_eval_loader_kwargs(get_dataset):
data = get_dataset(name='Cora')[0]

Expand Down
16 changes: 8 additions & 8 deletions test/graphgym/test_graphgym.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ def trivial_metric(true, pred, task_type):


@onlyOnline
@withPackage('yacs', 'pytorch_lightning')
@withPackage('yacs', 'lightning')
@pytest.mark.parametrize('auto_resume', [True, False])
@pytest.mark.parametrize('skip_train_eval', [True, False])
@pytest.mark.parametrize('use_trivial_metric', [True, False])
Expand Down Expand Up @@ -110,9 +110,9 @@ def test_run_single_graphgym(tmp_path, capfd, auto_resume, skip_train_eval,


@onlyOnline
@withPackage('yacs', 'pytorch_lightning')
@withPackage('yacs', 'lightning')
def test_graphgym_module(tmp_path):
import pytorch_lightning as pl
import lightning as L

load_cfg(cfg, args)
cfg.out_dir = osp.join(tmp_path, 'out_dir')
Expand All @@ -131,7 +131,7 @@ def test_graphgym_module(tmp_path):
assert len(loaders) == 3

model = create_model()
assert isinstance(model, pl.LightningModule)
assert isinstance(model, L.LightningModule)

optimizer, scheduler = model.configure_optimizers()
assert isinstance(optimizer[0], torch.optim.Adam)
Expand Down Expand Up @@ -172,11 +172,11 @@ def destroy_process_group():

@onlyOnline
@onlyLinux
@withPackage('yacs', 'pytorch_lightning')
@withPackage('yacs', 'lightning')
def test_train(destroy_process_group, tmp_path, capfd):
warnings.filterwarnings('ignore', ".*does not have many workers.*")

import pytorch_lightning as pl
import lightning as L

load_cfg(cfg, args)
cfg.out_dir = osp.join(tmp_path, 'out_dir')
Expand All @@ -195,8 +195,8 @@ def test_train(destroy_process_group, tmp_path, capfd):
model = create_model()
cfg.params = params_count(model)
logger = LoggerCallback()
trainer = pl.Trainer(max_epochs=1, max_steps=4, callbacks=logger,
log_every_n_steps=1)
trainer = L.Trainer(max_epochs=1, max_steps=4, callbacks=logger,
log_every_n_steps=1)
train_loader, val_loader = loaders[0], loaders[1]
trainer.fit(model, train_loader, val_loader)

Expand Down
2 changes: 1 addition & 1 deletion test/graphgym/test_logger.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
from torch_geometric.testing import withPackage


@withPackage('yacs', 'pytorch_lightning')
@withPackage('yacs', 'lightning')
def test_logger_callback():
loaders = create_loader()
assert len(loaders) == 3
Expand Down
Loading
Loading