-
Notifications
You must be signed in to change notification settings - Fork 1
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
first working transformer training!!
- Loading branch information
1 parent
91c97c8
commit 48449fc
Showing
3 changed files
with
174 additions
and
42 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,146 @@ | ||
import torch | ||
import torch.nn.functional as F | ||
import torchvision.datasets as datasets | ||
import torchvision.transforms as transforms | ||
from torch import nn, optim | ||
from torch.utils.data import DataLoader | ||
from tqdm import tqdm | ||
from torch.utils.data import random_split | ||
import pytorch_lightning as pl | ||
import torchmetrics | ||
from torchmetrics import Metric | ||
|
||
|
||
class MyAccuracy(Metric): | ||
def __init__(self): | ||
super().__init__() | ||
self.add_state("total", default=torch.tensor(0), dist_reduce_fx="sum") | ||
self.add_state("correct", default=torch.tensor(0), dist_reduce_fx="sum") | ||
|
||
def update(self, preds, target): | ||
preds = torch.argmax(preds, dim=1) | ||
assert preds.shape == target.shape | ||
self.correct += torch.sum(preds == target) | ||
self.total += target.numel() | ||
|
||
def compute(self): | ||
return self.correct.float() / self.total.float() | ||
|
||
|
||
class NN(pl.LightningModule): | ||
def __init__(self, input_size, num_classes): | ||
super().__init__() | ||
self.fc1 = nn.Linear(input_size, 50) | ||
self.fc2 = nn.Linear(50, num_classes) | ||
self.loss_fn = nn.CrossEntropyLoss() | ||
self.accuracy = torchmetrics.Accuracy(task="multiclass", num_classes=num_classes) | ||
self.my_accuracy = MyAccuracy() | ||
self.f1_score = torchmetrics.F1Score(task="multiclass", num_classes=num_classes) | ||
|
||
def forward(self, x): | ||
x = F.relu(self.fc1(x)) | ||
x = self.fc2(x) | ||
return x | ||
|
||
def training_step(self, batch, batch_idx): | ||
loss, scores, y = self._common_step(batch, batch_idx) | ||
accuracy = self.my_accuracy(scores, y) | ||
f1_score = self.f1_score(scores, y) | ||
self.log_dict({'train_loss': loss, 'train_accuracy': accuracy, 'train_f1_score': f1_score}, | ||
on_step=False, on_epoch=True, prog_bar=True) | ||
return {'loss': loss, "scores": scores, "y": y} | ||
|
||
def validation_step(self, batch, batch_idx): | ||
loss, scores, y = self._common_step(batch, batch_idx) | ||
self.log('val_loss', loss) | ||
return loss | ||
|
||
def test_step(self, batch, batch_idx): | ||
loss, scores, y = self._common_step(batch, batch_idx) | ||
self.log('test_loss', loss) | ||
return loss | ||
|
||
def _common_step(self, batch, batch_idx): | ||
x, y = batch | ||
x = x.reshape(x.size(0), -1) | ||
scores = self.forward(x) | ||
loss = self.loss_fn(scores, y) | ||
return loss, scores, y | ||
|
||
def predict_step(self, batch, batch_idx): | ||
x, y = batch | ||
x = x.reshape(x.size(0), -1) | ||
scores = self.forward(x) | ||
preds = torch.argmax(scores, dim=1) | ||
return preds | ||
|
||
def configure_optimizers(self): | ||
return optim.Adam(self.parameters(), lr=0.001) | ||
|
||
|
||
class MnistDataModule(pl.LightningDataModule): | ||
def __init__(self, data_dir, batch_size, num_workers): | ||
super().__init__() | ||
self.data_dir = data_dir | ||
self.batch_size = batch_size | ||
self.num_workers = num_workers | ||
|
||
def prepare_data(self): | ||
datasets.MNIST(self.data_dir, train=True, download=True) | ||
datasets.MNIST(self.data_dir, train=False, download=True) | ||
|
||
def setup(self, stage): | ||
entire_dataset = datasets.MNIST( | ||
root=self.data_dir, | ||
train=True, | ||
transform=transforms.ToTensor(), | ||
download=False, | ||
) | ||
self.train_ds, self.val_ds = random_split(entire_dataset, [50000, 10000]) | ||
self.test_ds = datasets.MNIST( | ||
root=self.data_dir, | ||
train=False, | ||
transform=transforms.ToTensor(), | ||
download=False, | ||
) | ||
|
||
def train_dataloader(self): | ||
return DataLoader( | ||
self.train_ds, | ||
batch_size=self.batch_size, | ||
num_workers=self.num_workers, | ||
shuffle=True, | ||
) | ||
|
||
def val_dataloader(self): | ||
return DataLoader( | ||
self.val_ds, | ||
batch_size=self.batch_size, | ||
num_workers=self.num_workers, | ||
shuffle=False, | ||
) | ||
|
||
def test_dataloader(self): | ||
return DataLoader( | ||
self.test_ds, | ||
batch_size=self.batch_size, | ||
num_workers=self.num_workers, | ||
shuffle=False, | ||
) | ||
|
||
# Set device cuda for GPU if it's available otherwise run on the CPU | ||
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") | ||
|
||
# Hyperparameters | ||
input_size = 784 | ||
num_classes = 10 | ||
learning_rate = 0.001 | ||
batch_size = 64 | ||
num_epochs = 3 | ||
|
||
model = NN(input_size=input_size, num_classes=num_classes) | ||
dm = MnistDataModule(data_dir="dataset/", batch_size=batch_size, num_workers=4) | ||
trainer = pl.Trainer(accelerator="gpu", devices=1, min_epochs=1, max_epochs=3, precision=16) | ||
trainer.fit(model, dm) | ||
trainer.validate(model, dm) | ||
trainer.test(model, dm) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters