-
Notifications
You must be signed in to change notification settings - Fork 1
/
dataloader.py
145 lines (124 loc) · 4.14 KB
/
dataloader.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
import os
import random
import torch
from PIL import Image
from torch.utils import data
from torchvision import transforms as T
from torchvision.datasets import ImageFolder
class Loader(data.Dataset):
"""
Dataset class for the CelebA dataset
"""
def __init__(self, image_dir, attr_path, selected_attrs, attr_dims,
transform, mode):
"""
Initialize and preprocess the CelebA dataset
"""
self.image_dir = image_dir
self.attr_path = attr_path
self.selected_attrs = selected_attrs
self.attr_dims = attr_dims
self.transform = transform
self.mode = mode
self.train_dataset = []
self.test_dataset = []
self.attr2idx = {}
self.idx2attr = {}
self.preprocess()
if mode == "train":
self.num_images = len(self.train_dataset)
else:
self.num_images = len(self.test_dataset)
def preprocess(self):
"""
Preprocess the CelebA attribute file
"""
lines = [line.rstrip() for line in open(self.attr_path, "r")]
all_attr_names = lines[1].split()
for i, attr_name in enumerate(all_attr_names):
self.attr2idx[attr_name] = i
self.idx2attr[i] = attr_name
lines = lines[2:]
random.seed(135)
random.shuffle(lines)
cnt = 0
for i, line in enumerate(lines):
split = line.split()
filename = split[0]
values = split[1:]
label = []
for attr_name in self.selected_attrs:
idx = self.attr2idx[attr_name]
label.append(values[idx] == "1")
# start = 0
# ambiguous = False
# for attr_dim in self.attr_dims:
# if attr_dim > 1 and not any(label[start:start+attr_dim]):
# ambiguous = True
# start += attr_dim
# if ambiguous:
# continue
cnt += 1
if cnt <= 2000:
self.test_dataset.append([filename, label])
else:
self.train_dataset.append([filename, label])
print("Build dataset with attributes:", " ".join(self.selected_attrs))
print("Train dataset: {} images.".format(len(self.train_dataset)))
print("Test dataset: {} images.".format(len(self.test_dataset)))
print("\n")
def __getitem__(self, index):
"""
Return one image and its corresponding attribute label
"""
dataset = self.train_dataset if self.mode == "train" else self.test_dataset
filename, label = dataset[index]
image = Image.open(os.path.join(self.image_dir, filename))
return self.transform(image), torch.FloatTensor(label)
def __len__(self):
"""
Return the number of images
"""
return self.num_images
def get_loader(
image_dir="./data/celeba/images",
attr_path="./data/celeba/list_attr_celeba.txt",
selected_attrs=None,
attr_dims=None,
crop_size=178,
image_size=128,
batch_size=8,
mode="train",
):
"""
build a data loader
"""
if selected_attrs is None:
selected_attrs = ["Black_Hair", "Blond_Hair", "Brown_Hair"]
if attr_dims is None:
attr_dims = [3, 1, 1]
transform = []
if mode == "train":
transform.append(T.RandomHorizontalFlip())
transform.append(T.CenterCrop(crop_size))
transform.append(T.Resize(image_size))
transform.append(T.ToTensor())
transform.append(T.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)))
transform = T.Compose(transform)
dataset = Loader(image_dir, attr_path, selected_attrs, attr_dims,
transform, mode)
data_loader = data.DataLoader(dataset=dataset,
batch_size=batch_size,
shuffle=(mode == "train"))
return data_loader
if __name__ == "__main__":
"""
test code
"""
data_loader = get_loader()
data_iter = iter(data_loader)
x_fixed, c_org = next(data_iter)
print(x_fixed)
print(x_fixed.size())
print(c_org)
print(c_org.size())