diff --git a/dataloader/dataloader.py b/dataloader/dataloader.py index 7686a1a..911a4c0 100644 --- a/dataloader/dataloader.py +++ b/dataloader/dataloader.py @@ -68,8 +68,8 @@ def __getitem__(self, index): label = skimage.io.imread(self.target_images[index]) if self.transform: image = self.transform(image) - image= torch.from_numpy(image) - label= torch.from_numpy(label) + image = torch.from_numpy(image) + label = torch.from_numpy(label) if torch.cuda.is_available(): return {"Image": image.cuda(), "Label": label.cuda()} else: diff --git a/hyperparams/hyperparams.py b/hyperparams/hyperparams.py index 654bd88..0c4de84 100644 --- a/hyperparams/hyperparams.py +++ b/hyperparams/hyperparams.py @@ -1,6 +1,5 @@ class hyperparameters: def __init__(self, **kwargs): - self.hyperparameters= dict() + self.hyperparameters = dict() for key, value in kwargs.items(): self.hyperparameters.update({key: value}) - diff --git a/loss/diceloss.py b/loss/diceloss.py index f4b3a5a..4d1c6c8 100644 --- a/loss/diceloss.py +++ b/loss/diceloss.py @@ -1,6 +1,8 @@ import torch import torch.nn as nn import torch.functional as F + + class Loss: @classmethod def dice_loss(self, pred, target, smooth=1.): @@ -8,7 +10,7 @@ def dice_loss(self, pred, target, smooth=1.): target = target.contiguous() intersection = (pred * target).sum(dim=2).sum(dim=2) loss = (1 - ((2. * intersection + smooth) / - (pred.sum(dim=2).sum(dim=2) + target.sum(dim=2).sum(dim=2) + smooth))) + (pred.sum(dim=2).sum(dim=2) + target.sum(dim=2).sum(dim=2) + smooth))) return loss.mean() def calc_loss(self, pred, target, metrics, bce_weight=0.5): diff --git a/test/test_iter.py b/test/test_iter.py index 285693a..9d1441b 100644 --- a/test/test_iter.py +++ b/test/test_iter.py @@ -15,5 +15,7 @@ train_percentage=params.hyperparameters['train_percentage']) Train = TrainSet(Images.train_set, extension="tif", transform=None) Test = TestSet(Images.test_set, extension="tif", transform=None) -for i in range(len(Train)): - print(Train[i].size()) \ No newline at end of file +TrainLoder = DataLoader(Train, batch_size=4) +for i, data in enumerate(TrainLoder, 0): + inputs, labels = data["Image"], data["Label"] + print(inputs.size()) diff --git a/train.py b/train.py index b930dbe..82cf11f 100644 --- a/train.py +++ b/train.py @@ -9,12 +9,12 @@ import torch from torchvision import transforms -transforms_compose= transforms.Compose([transforms.ToTensor()]) +transforms_compose = transforms.Compose([transforms.ToTensor()]) params = hyperparameters(train_percentage=0.6, batch_size=1, epoch=4) if torch.cuda.is_available(): - net= UNeT(n_class=1).cuda() + net = UNeT(n_class=1).cuda() else: - net= net= UNeT(n_class=1) + net = net = UNeT(n_class=1) IMAGE_DIR = "/Users/madhav/DataSets/AerialImageDataset/train/images/*.tif" ANNOTATIONS_DIR = "/Users/madhav/DataSets/AerialImageDataset/train/gt/*.tif" @@ -23,7 +23,10 @@ Annotations=ANNOTATIONS_DIR, train_percentage=0.7) loss_val = Loss() -Train = TrainSet(Images.train_set, extension="tif", transform=transforms_compose) +Train = TrainSet( + Images.train_set, + extension="tif", + transform=transforms_compose) Test = TestSet(Images.test_set, extension="tif", transform=None) TrainLoder = DataLoader( Train, @@ -39,7 +42,7 @@ metrics = defaultdict() running_loss = 0.0 for i, data in enumerate(TrainLoder, 0): - inputs, labels= data["Image"], data["Label"] + inputs, labels = data["Image"], data["Label"] #inputs, labels= inputs.permute(0, 3, 1, 2), labels.permute(0, 3, 1, 2) optimizer.zero_grad() outputs = net(inputs) @@ -48,7 +51,7 @@ optimizer.step() running_loss += loss.item() print('[%d, %5d] loss: %.3f' % - (epoch + 1, i + 1, running_loss / 2000)) + (epoch + 1, i + 1, running_loss / 2000)) running_loss = 0. print('Finished Training')