-
Notifications
You must be signed in to change notification settings - Fork 74
/
Copy pathdataset.py
61 lines (50 loc) · 1.81 KB
/
dataset.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
from torch.utils.data import Dataset
from PIL import Image
import torch
import config
import torchvision.transforms as transforms
import numpy as np
from sklearn import preprocessing
def readTxt(file_path):
img_list = []
with open(file_path, 'r') as file_to_read:
while True:
lines = file_to_read.readline()
if not lines:
break
item = lines.strip().split()
img_list.append(item)
file_to_read.close()
return img_list
class RoadSequenceDataset(Dataset):
def __init__(self, file_path, transforms):
self.img_list = readTxt(file_path)
self.dataset_size = len(self.img_list)
self.transforms = transforms
def __len__(self):
return self.dataset_size
def __getitem__(self, idx):
img_path_list = self.img_list[idx]
data = Image.open(img_path_list[4])
label = Image.open(img_path_list[5])
data = self.transforms(data)
label = torch.squeeze(self.transforms(label))
sample = {'data': data, 'label': label}
return sample
class RoadSequenceDatasetList(Dataset):
def __init__(self, file_path, transforms):
self.img_list = readTxt(file_path)
self.dataset_size = len(self.img_list)
self.transforms = transforms
def __len__(self):
return self.dataset_size
def __getitem__(self, idx):
img_path_list = self.img_list[idx]
data = []
for i in range(5):
data.append(torch.unsqueeze(self.transforms(Image.open(img_path_list[i])), dim=0))
data = torch.cat(data, 0)
label = Image.open(img_path_list[5])
label = torch.squeeze(self.transforms(label))
sample = {'data': data, 'label': label}
return sample