-
Notifications
You must be signed in to change notification settings - Fork 6
/
train-wa.py
140 lines (103 loc) · 4.68 KB
/
train-wa.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
import json
import time
import argparse
import shutil
import os
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
from core.data import get_data_info
from core.data import load_data
from core.data import SEMISUP_DATASETS
from core.utils import format_time
from core.utils import Logger
from core.utils import parser_train
from core.utils import Trainer
from core.utils import seed
from gowal21uncovering.utils import WATrainer
# Setup
parse = parser_train()
parse.add_argument('--tau', type=float, default=0.995, help='Weight averaging decay.')
args = parse.parse_args()
assert args.data in SEMISUP_DATASETS, f'Only data in {SEMISUP_DATASETS} is supported!'
DATA_DIR = os.path.join(args.data_dir, args.data)
LOG_DIR = os.path.join(args.log_dir, args.desc)
WEIGHTS = os.path.join(LOG_DIR, 'weights-best.pt')
if os.path.exists(LOG_DIR):
shutil.rmtree(LOG_DIR)
os.makedirs(LOG_DIR)
logger = Logger(os.path.join(LOG_DIR, 'log-train.log'))
with open(os.path.join(LOG_DIR, 'args.txt'), 'w') as f:
json.dump(args.__dict__, f, indent=4)
info = get_data_info(DATA_DIR)
BATCH_SIZE = args.batch_size
BATCH_SIZE_VALIDATION = args.batch_size_validation
NUM_ADV_EPOCHS = args.num_adv_epochs
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
logger.log('Using device: {}'.format(device))
if args.debug:
NUM_ADV_EPOCHS = 1
# To speed up training
torch.backends.cudnn.benchmark = True
# Load data
seed(args.seed)
train_dataset, test_dataset, eval_dataset, train_dataloader, test_dataloader, eval_dataloader = load_data(
DATA_DIR, BATCH_SIZE, BATCH_SIZE_VALIDATION, use_augmentation=args.augment, shuffle_train=True,
aux_data_filename=args.aux_data_filename, unsup_fraction=args.unsup_fraction, validation=True
)
del train_dataset, test_dataset, eval_dataset
# Adversarial Training
seed(args.seed)
if args.tau:
print ('Using WA.')
trainer = WATrainer(info, args)
else:
trainer = Trainer(info, args)
last_lr = args.lr
if NUM_ADV_EPOCHS > 0:
logger.log('\n\n')
metrics = pd.DataFrame()
logger.log('Standard Accuracy-\tTest: {:2f}%.'.format(trainer.eval(test_dataloader)*100))
old_score = [0.0, 0.0]
logger.log('RST Adversarial training for {} epochs'.format(NUM_ADV_EPOCHS))
trainer.init_optimizer(args.num_adv_epochs)
test_adv_acc = 0.0
for epoch in range(1, NUM_ADV_EPOCHS+1):
start = time.time()
logger.log('======= Epoch {} ======='.format(epoch))
if args.scheduler:
last_lr = trainer.scheduler.get_last_lr()[0]
res = trainer.train(train_dataloader, epoch=epoch, adversarial=True)
test_acc = trainer.eval(test_dataloader)
logger.log('Loss: {:.4f}.\tLR: {:.4f}'.format(res['loss'], last_lr))
if 'clean_acc' in res:
logger.log('Standard Accuracy-\tTrain: {:.2f}%.\tTest: {:.2f}%.'.format(res['clean_acc']*100, test_acc*100))
else:
logger.log('Standard Accuracy-\tTest: {:.2f}%.'.format(test_acc*100))
epoch_metrics = {'train_'+k: v for k, v in res.items()}
epoch_metrics.update({'epoch': epoch, 'lr': last_lr, 'test_clean_acc': test_acc, 'test_adversarial_acc': ''})
if epoch % args.adv_eval_freq == 0 or epoch == NUM_ADV_EPOCHS:
test_adv_acc = trainer.eval(test_dataloader, adversarial=True)
logger.log('Adversarial Accuracy-\tTrain: {:.2f}%.\tTest: {:.2f}%.'.format(res['adversarial_acc']*100,
test_adv_acc*100))
epoch_metrics.update({'test_adversarial_acc': test_adv_acc})
else:
logger.log('Adversarial Accuracy-\tTrain: {:.2f}%.'.format(res['adversarial_acc']*100))
eval_adv_acc = trainer.eval(eval_dataloader, adversarial=True)
logger.log('Adversarial Accuracy-\tEval: {:.2f}%.'.format(eval_adv_acc*100))
epoch_metrics['eval_adversarial_acc'] = eval_adv_acc
if eval_adv_acc >= old_score[1]:
old_score[0], old_score[1] = test_acc, eval_adv_acc
trainer.save_model(WEIGHTS)
trainer.save_model(os.path.join(LOG_DIR, 'weights-last.pt'))
logger.log('Time taken: {}'.format(format_time(time.time()-start)))
metrics = metrics.append(pd.DataFrame(epoch_metrics, index=[0]), ignore_index=True)
metrics.to_csv(os.path.join(LOG_DIR, 'stats_adv.csv'), index=False)
# Record metrics
train_acc = res['clean_acc'] if 'clean_acc' in res else trainer.eval(train_dataloader)
logger.log('\nTraining completed.')
logger.log('Standard Accuracy-\tTrain: {:.2f}%.\tTest: {:.2f}%.'.format(train_acc*100, old_score[0]*100))
if NUM_ADV_EPOCHS > 0:
logger.log('Adversarial Accuracy-\tTrain: {:.2f}%.\tEval: {:.2f}%.'.format(res['adversarial_acc']*100, old_score[1]*100))
logger.log('Script Completed.')