-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathcreate_dataset.py
73 lines (56 loc) · 2.38 KB
/
create_dataset.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
import numpy as np
from options.train_options import TrainOptions
import torch
from torch import nn
from utils.IDExtract import IDExtractor
import torch.nn.functional as F
from torch.utils.data import DataLoader
from torchvision import transforms
from models.models import create_model
from data.VGGface2HQ import VGGFace2HQDataset, ComposedLoader
import time
import matplotlib.pyplot as plt
import warnings
from utils.loss import IDLoss
from utils.utils import mkdirs
import os
warnings.filterwarnings("ignore")
torch.autograd.set_detect_anomaly(True)
if __name__ == '__main__':
opt = TrainOptions().parse()
path = opt.dataroot
train_path = os.path.join(path, 'train')
test_path = os.path.join(path, 'test')
# create folders
for d in os.listdir(os.path.join(train_path, 'images')):
path = os.path.join(train_path, 'latent-ID')
path = os.path.join(path, d)
mkdirs(path)
for d in os.listdir(os.path.join(test_path, 'images')):
path = os.path.join(test_path, 'latent-ID')
path = os.path.join(path, d)
mkdirs(path)
transformer_to_tensor = transforms.ToTensor()
if opt.fp16:
from torch.cuda.amp import autocast
print("Generating data loaders...")
train_data = VGGFace2HQDataset(opt, isTrain=True, transform=transformer_to_tensor, is_same_ID=True, auto_same_ID=False, random_in_ID=False)
train_loader = DataLoader(dataset=train_data, batch_size=opt.batchSize, shuffle=False, num_workers=3)
test_data = VGGFace2HQDataset(opt, isTrain=False, transform=transformer_to_tensor, is_same_ID=True, auto_same_ID=False, random_in_ID=False)
test_loader = DataLoader(dataset=test_data, batch_size=opt.batchSize, shuffle=False, num_workers=3)
print("Dataloaders ready.")
torch.nn.Module.dump_patches = True
# train_size = len(train_data)
# print('Creating latents for train set...')
# cnt = 0
# for _ in enumerate(train_loader, start=1):
# cnt += opt.batchSize
# print('Generated {}/{} ({:.3%})'.format(cnt, train_size, 1. * cnt / train_size))
# print('Train set successfully constructed.')
test_size = len(test_data)
print('Creating latents for test...')
cnt = 0
for _ in enumerate(test_loader, start=1):
cnt += opt.batchSize
print('Generated {}/{} ({:.3%})'.format(cnt, test_size, 1. * cnt / test_size))
print('Test set successfully constructed.')