-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathmain.py
307 lines (254 loc) · 10.4 KB
/
main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
from lib.data import Mimic2
import numpy as np
from lib.model import LR
from lib.train import Trainer, prepareData
from torch.utils.data import Dataset, DataLoader, TensorDataset
from lib.regularization import eye_loss, wridge, wlasso, lasso, \
enet, owl, ridge, eye_loss2, eye_loss_height
from sklearn.metrics import accuracy_score
from lib.utility import get_y_yhat, model_auc, calcAP, sweepS1, sparsity, bootstrap
import torch, os
from scipy.stats import ttest_rel
from torch.autograd import Variable
from sklearn.model_selection import train_test_split
from joblib import Parallel, delayed
import tqdm
import joblib
import sys
def trainData(name, data, regularization=eye_loss, alpha=0.01, n_epochs=300,
learning_rate=1e-3, batch_size=4000, r=None, test=False):
'''
return validation auc, average precision, score1
if test is true, combine train and val and report on test performance
'''
m = data
if test:
name = 'test' + name
xtrain = np.vstack([m.xtrain, m.xval])
xval = m.xte
ytrain = np.hstack([m.ytrain, m.yval])
yval = m.yte
else:
xtrain = m.xtrain
xval = m.xval
ytrain = m.ytrain
yval = m.yval
# note: for cross validation, just split data into n fold and
# choose appropriate train_data and valdata from those folds
# not doing here for simplicity
d = m.r.size(0)
train_data = TensorDataset(*map(lambda x: x.data, prepareData(xtrain, ytrain)))
data = DataLoader(train_data, batch_size=batch_size, shuffle=True)
valdata = TensorDataset(*map(lambda x: x.data, prepareData(xval, yval)))
valdata = DataLoader(valdata, batch_size=4000, shuffle=True)
n_output = 2 # binary classification task
model = LR(d, n_output)
reg_parameters = model.i2o.weight
t = Trainer(model, lr=learning_rate, risk_factors=m.r, alpha=alpha,
regularization=regularization, reg_parameters=reg_parameters,
name=name)
losses, vallosses = t.fit(data, n_epochs=n_epochs, print_every=1, valdata=valdata)
# report statistics
val_auc = model_auc(model, valdata)
ap = calcAP(m.r.data.numpy(), (reg_parameters[1] - reg_parameters[0]).data.numpy())
t, s1 = sweepS1(model, valdata)
sp = sparsity((reg_parameters[1]-reg_parameters[0]).data.numpy())
joblib.dump((val_auc, ap, s1, sp), 'models/' + name + '.pkl')
return val_auc, ap, s1, sp
class ParamSearch:
def __init__(self, data, n_cpus=10):
self.tasks = []
self.hyperparams = []
self.n_cpus = n_cpus
self.data = data
valdata = TensorDataset(*map(lambda x: x.data,
prepareData(data.xval, data.yval)))
self.valdata = DataLoader(valdata, batch_size=4000, shuffle=True)
def add_param(self, name, reg, alpha):
if not os.path.exists('models/' + name + '.pkl'):
self.tasks.append((name, self.data, reg, alpha))
self.hyperparams.append((name, reg, alpha))
def select_on_auc_sp(self, n_bootstrap=100):
'''
return the index in self.hyperparams chosen
hyper parameter selection based on auc and sp
choose the hyper parameter that has no auc difference with top
but is the sparsest model
This is the criteria used in the learning credible models paper
'''
print('hyperparam select using auc and sparsity')
aucs = []
models = []
sparsities = []
for name, reg, alpha in self.hyperparams:
# load the model
model = torch.load('models/' + name + '.pt')
reg_parameters = model.i2o.weight
sp = sparsity((reg_parameters[1]-reg_parameters[0]).data.numpy())
models.append(model)
sparsities.append(sp)
for _ in range(n_bootstrap):
test = bootstrap(self.valdata)
local_aucs = []
for model in models:
# bootstrap for CI on auc
local_aucs.append(model_auc(model, test))
aucs.append(local_aucs)
aucs = np.array(aucs)
# only keep those with high auc
b = np.argmax(aucs.mean(0))
discardset = set([])
for a in range(len(models)):
diffs = ((aucs[:,a] - aucs[:,b]) >= 0).astype(int)
if diffs.sum() / diffs.shape[0] <= 0.05:
discardset.add(a)
# choose the one with largest sparsity
chosen, sp = max(filter(lambda x: x[0] not in discardset,
enumerate(sparsities)),
key=lambda x: x[1])
return chosen
def select_on_auc(self, *args, **kwargs):
'''hyperparameter selection based on auc alone
return index within self.hyperparams that need to retrain
'''
print('hyperparam select using auc')
aucs = []
sparsities = []
for name, reg, alpha in self.hyperparams:
# load the model
model = torch.load('models/' + name + '.pt')
aucs.append(model_auc(model, self.valdata))
# choose the one with largest auc
chosen, auc = max(enumerate(aucs),
key=lambda x: x[1])
return chosen
def select_on_auc_ap(self, n_bootstrap=100):
'''
return the index in self.hyperparams chosen
hyper parameter selection based on auc and ap
choose the hyper parameter that has no auc difference with top
but is the model with highest average precision (align with expert)
'''
print('hyperparam select using auc and ap')
aucs = []
models = []
aps = []
for name, reg, alpha in self.hyperparams:
# load the model
model = torch.load('models/' + name + '.pt')
reg_parameters = model.i2o.weight
ap = calcAP(self.data.r.data.numpy(),
(reg_parameters[1] - reg_parameters[0]).data.numpy())
models.append(model)
aps.append(ap)
for _ in range(n_bootstrap):
test = bootstrap(self.valdata)
local_aucs = []
for model in models:
# bootstrap for CI on auc
local_aucs.append(model_auc(model, test))
aucs.append(local_aucs)
aucs = np.array(aucs)
# only keep those with high auc
b = np.argmax(aucs.mean(0))
discardset = set([])
for a in range(len(models)):
diffs = ((aucs[:,a] - aucs[:,b]) >= 0).astype(int)
if diffs.sum() / diffs.shape[0] <= 0.05:
discardset.add(a)
# choose the one with largest average precision
chosen, ap = max(filter(lambda x: x[0] not in discardset,
enumerate(aps)),
key=lambda x: x[1])
return chosen
def run(self, n_bootstrap=100):
if self.n_cpus is None: n_jobs = 10
else: n_jobs = self.n_cpus
Parallel(n_jobs=n_jobs)(delayed(trainData)(*task) for task in self.tasks)
# select a model to run
chosen_idx = self.select_on_auc_sp(n_bootstrap=n_bootstrap)
# chosen_idx = self.select_on_auc() # don't need bootstrap
# chosen_idx = self.select_on_auc_ap(n_bootstrap=n_bootstrap)
# retrian the chosen model
name, reg, alpha = self.hyperparams[chosen_idx]
print('name', name)
trainData(name, self.data, reg, alpha, test=True)
def random_risk_exp(regs, n_cpus=None, n_bootstrap=30):
m = Mimic2(mode='total', random_risk=True)
ps = ParamSearch(m, n_cpus)
reg = eye_loss
alphas = [0.1, 0.01, 0.001, 0.0001, 0.00001]
for reg in regs:
for alpha in alphas:
name = 'random_risk_' + reg.__name__ + '^' + str(alpha)
ps.add_param(name, reg, alpha)
ps.run(n_bootstrap)
def reg_exp(regs, n_cpus=None, n_bootstrap=30):
m = Mimic2(mode='total')
ps = ParamSearch(m, n_cpus)
alphas = [0.1, 0.01, 0.001, 0.0001, 0.00001]
for reg in regs:
for alpha in alphas:
name = reg.__name__ + '^' + str(alpha)
ps.add_param(name, reg, alpha)
ps.run(n_bootstrap)
def eye_height_exp(regs, n_cpus=None, n_bootstrap=30):
m = Mimic2(mode='total')
ps = ParamSearch(m, n_cpus)
alphas = [0.1, 0.01, 0.001, 0.0001, 0.00001]
for reg in regs:
for alpha in alphas:
name = reg.__name__ + '^' + str(alpha)
ps.add_param(name, reg, alpha)
ps.run(n_bootstrap)
def expert_feature_only_exp(n_cpus=None, n_bootstrap=30):
m = Mimic2(mode='total', expert_feature_only=True)
ps = ParamSearch(m, n_cpus)
reg = ridge
alphas = [0.1, 0.01, 0.001, 0.0001, 0.00001]
for alpha in alphas:
name = 'expert_only_ridge' + '^' + str(alpha)
ps.add_param(name, reg, alpha)
ps.run(n_bootstrap)
def duplicate_exp(regs, n_cpus=None, n_bootstrap=30):
m = Mimic2(mode='total', duplicate=1)
ps = ParamSearch(m, n_cpus)
alphas = [0.1, 0.01, 0.001, 0.0001, 0.00001]
for reg in regs:
for alpha in alphas:
name = reg.__name__ + '_dup_' + '^' + str(alpha)
ps.add_param(name, reg, alpha)
ps.run(n_bootstrap)
def two_stage_exp(threshold=0.90, n_cpus=None, n_bootstrap=30):
'''
remove features by setting a threshold on correlation,
then apply l2 regularization on the remaining features
'''
m = Mimic2(mode='total', two_stage=True, threshold=float(threshold))
ps = ParamSearch(m, n_cpus)
reg = ridge
alphas = [0.1, 0.01, 0.001, 0.0001, 0.00001]
for alpha in alphas:
name = 'two_stage_ridge_' + str(threshold) + '^' + str(alpha)
ps.add_param(name, reg, alpha)
ps.run(n_bootstrap)
#####################################################
def wridge1_5(*args, **kwargs):
return wridge(*args, **kwargs, w=1.5)
def wridge3(*args, **kwargs):
return wridge(*args, **kwargs, w=3)
def wlasso1_5(*args, **kwargs):
return wlasso(*args, **kwargs, w=1.5)
def wlasso3(*args, **kwargs):
return wlasso(*args, **kwargs, w=3)
if __name__ == '__main__':
if len(sys.argv) < 2:
print('please specify your function and argument to run')
else:
print(sys.argv[1:])
f = eval(sys.argv[1])
if len(sys.argv) >= 3:
args = eval(sys.argv[2])
f(args)
else:
f()