forked from wang-fujin/PINN4SOH
-
Notifications
You must be signed in to change notification settings - Fork 0
/
main_TJU.py
130 lines (110 loc) · 6.29 KB
/
main_TJU.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
from dataloader.dataloader import XJTUdata,TJUdata
from Model.Model import PINN
import argparse
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
def get_args():
parser = argparse.ArgumentParser('Hyper Parameters for TJU dataset')
parser.add_argument('--data', type=str, default='TJU', help='XJTU, HUST, MIT, TJU')
parser.add_argument('--in_same_batch', type=bool, default=True, help='训练集和测试集是否在同一个batch中(whether train and test sets are in the same batch)')
parser.add_argument('--train_batch', type=int, default=-1, choices=[-1,0,1,2],
help='如果是-1,读取全部数据,并随机划分训练集和测试集;否则,读取对应的batch数据'
'(if -1, read all data and random split train and test sets; '
'else, read the corresponding batch data)')
parser.add_argument('--test_batch', type=int, default=-1, choices=[-1,0,1,2],
help='如果是-1,读取全部数据,并随机划分训练集和测试集;否则,读取对应的batch数据'
'(if -1, read all data and random split train and test sets; '
'else, read the corresponding batch data)')
parser.add_argument('--batch',type=int,default=0,choices=[0,1,2])
parser.add_argument('--batch_size', type=int, default=512, help='batch size')
parser.add_argument('--normalization_method', type=str, default='min-max', help='min-max,z-score')
# scheduler related
parser.add_argument('--epochs', type=int, default=200, help='epoch')
parser.add_argument('--early_stop', type=int, default=20, help='early stop')
parser.add_argument('--warmup_epochs', type=int, default=30, help='warmup epoch')
parser.add_argument('--warmup_lr', type=float, default=0.002, help='warmup lr')
parser.add_argument('--lr', type=float, default=0.01, help='base lr')
parser.add_argument('--final_lr', type=float, default=0.0002, help='final lr')
parser.add_argument('--lr_F', type=float, default=0.001, help='lr of F')
# model related
parser.add_argument('--F_layers_num', type=int, default=3, help='the layers num of F')
parser.add_argument('--F_hidden_dim', type=int, default=60, help='the hidden dim of F')
# loss related
parser.add_argument('--alpha', type=float, default=1, help='loss = l_data + alpha * l_PDE + beta * l_physics')
parser.add_argument('--beta', type=float, default=0.05, help='loss = l_data + alpha * l_PDE + beta * l_physics')
parser.add_argument('--log_dir', type=str, default='logging.txt', help='log dir, if None, do not save')
parser.add_argument('--save_folder', type=str, default='results/TJU results', help='save folder')
args = parser.parse_args()
return args
def load_TJU_data(args,small_sample=None):
root = 'data/TJU data'
data = TJUdata(root=root, args=args)
train_list = []
test_list = []
# 序号的个位数字是5或者9的是测试集,其他的是训练集
# The numbers whose units digit is 5 or 9 are test set, and the others are training set
mod = [(5,9),(4,8),(5,9)]
if args.in_same_batch:
batchs = os.listdir(root)
batch = batchs[args.batch]
batch_root = os.path.join(root,batch)
files = os.listdir(batch_root)
for i,f in enumerate(files):
# 判断i的个位数字是否为5或者9 (judge whether the units digit of i is 5 or 9)
id = i + 1
if id % 10 == mod[args.batch][0] or id % 10 == mod[args.batch][1]:
test_list.append(os.path.join(batch_root,f))
print(f)
else:
train_list.append(os.path.join(batch_root,f))
if small_sample is not None:
train_list = train_list[:small_sample]
train_loader = data.read_all(specific_path_list=train_list)
test_loader = data.read_all(specific_path_list=test_list)
dataloader = {'train': train_loader['train_2'],
'valid': train_loader['valid_2'],
'test': test_loader['test_3']}
else: # 如果训练集和测试集不在同一个batch中,则一个batch用来训练,另一个batch用来测试
# (If the training set and test set are not in the same batch,
# one batch is used for training and the other batch is used for testing)
batchs = os.listdir(root)
train_loader = data.read_one_batch(args.train_batch)
test_loader = data.read_one_batch(args.test_batch)
dataloader = {'train': train_loader['train_2'],
'valid': train_loader['valid_2'],
'test': test_loader['test_3']}
return dataloader
def main():
args = get_args()
batchs = [0,1,2]
for batch in batchs:
setattr(args, 'in_same_batch', True)
setattr(args, 'batch', batch)
for e in range(10):
if args.in_same_batch:
save_folder = 'results/TJU results/' + str(batch) + '-' + str(batch) + '/Experiment' + str(e + 1)
else:
save_folder = 'results/TJU results/' + str(args.train_batch) + '-' + str(
args.test_batch) + '/Experiment' + str(e + 1)
setattr(args, "save_folder", save_folder)
if not os.path.exists(save_folder):
os.makedirs(save_folder)
dataloader = load_TJU_data(args)
pinn = PINN(args)
pinn.Train(trainloader=dataloader['train'], validloader=dataloader['valid'], testloader=dataloader['test'])
def small_sample():
args = get_args()
num_battery = 2
batch = 2
for e in range(10):
setattr(args,'in_same_batch',True)
setattr(args,'batch',batch)
setattr(args,'save_folder',f'results/TJU results (small sample {num_battery})/{batch}-{batch}/Experiment{e+1}')
setattr(args, 'batch_size', 128)
if not os.path.exists(args.save_folder):
os.makedirs(args.save_folder)
dataloader = load_TJU_data(args,small_sample=num_battery)
pinn = PINN(args)
pinn.Train(trainloader=dataloader['train'], validloader=dataloader['valid'], testloader=dataloader['test'])
if __name__ == '__main__':
pass