-
Notifications
You must be signed in to change notification settings - Fork 0
/
train.py
67 lines (52 loc) · 1.96 KB
/
train.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
#coding=utf-8
import torch
import torch.nn as nn
from torch.optim.lr_scheduler import MultiStepLR
import shutil
import time
from config import num_classes, model_name, model_path, lr_milestones, lr_decay_rate, input_size, \
root, end_epoch, save_interval, init_lr, batch_size, CUDA_VISIBLE_DEVICES, weight_decay, \
proposalN, set, channels
from utils.train_model import train
from utils.read_dataset import read_dataset
from utils.auto_laod_resume import auto_load_resume
from networks.model import MainNet
import os
os.environ['CUDA_VISIBLE_DEVICES'] = CUDA_VISIBLE_DEVICES
def main():
#加载数据
trainloader, testloader = read_dataset(input_size, batch_size, root, set)
#定义模型
model = MainNet(proposalN=proposalN, num_classes=num_classes, channels=channels)
#设置训练参数
criterion = nn.CrossEntropyLoss()
parameters = model.parameters()
#加载checkpoint
save_path = os.path.join(model_path, model_name)
if os.path.exists(save_path):
start_epoch, lr = auto_load_resume(model, save_path, status='train')
assert start_epoch < end_epoch
else:
os.makedirs(save_path)
start_epoch = 0
lr = init_lr
# define optimizers
optimizer = torch.optim.SGD(parameters, lr=lr, momentum=0.9, weight_decay=weight_decay)
model = model.cuda() # 部署在GPU
scheduler = MultiStepLR(optimizer, milestones=lr_milestones, gamma=lr_decay_rate)
# 保存config参数信息
time_str = time.strftime("%Y%m%d-%H%M%S")
shutil.copy('./config.py', os.path.join(save_path, "{}config.py".format(time_str)))
# 开始训练
train(model=model,
trainloader=trainloader,
testloader=testloader,
criterion=criterion,
optimizer=optimizer,
scheduler=scheduler,
save_path=save_path,
start_epoch=start_epoch,
end_epoch=end_epoch,
save_interval=save_interval)
if __name__ == '__main__':
main()