@@ -55,7 +55,7 @@ def __init__(
55
55
ps = 0.3 ,
56
56
lr = 0.01 ,
57
57
pretrain = True ,
58
- pretrain_file = "./pretrain/best.model" ,
58
+ pretrain_file = None ,
59
59
):
60
60
"""
61
61
TabNet model for Qlib
@@ -81,7 +81,7 @@ def __init__(
81
81
self .metric = metric
82
82
self .early_stop = early_stop
83
83
self .pretrain = pretrain
84
- self .pretrain_file = pretrain_file
84
+ self .pretrain_file = get_or_create_path ( pretrain_file )
85
85
self .logger .info (
86
86
"TabNet:"
87
87
"\n batch_size : {}"
@@ -116,6 +116,10 @@ def __init__(
116
116
else :
117
117
raise NotImplementedError ("optimizer {} is not supported!" .format (optimizer ))
118
118
119
+ @property
120
+ def use_gpu (self ):
121
+ self .device == torch .device ("cpu" )
122
+
119
123
def pretrain_fn (self , dataset = DatasetH , pretrain_file = "./pretrain/best.model" ):
120
124
get_or_create_path (pretrain_file )
121
125
@@ -182,7 +186,7 @@ def fit(
182
186
183
187
stop_steps = 0
184
188
train_loss = 0
185
- best_score = np .inf
189
+ best_score = - np .inf
186
190
best_epoch = 0
187
191
evals_result ["train" ] = []
188
192
evals_result ["valid" ] = []
@@ -201,7 +205,7 @@ def fit(
201
205
evals_result ["train" ].append (train_score )
202
206
evals_result ["valid" ].append (val_score )
203
207
204
- if val_score < best_score :
208
+ if val_score > best_score :
205
209
best_score = val_score
206
210
stop_steps = 0
207
211
best_epoch = epoch_idx
@@ -215,6 +219,9 @@ def fit(
215
219
self .logger .info ("best score: %.6lf @ %d" % (best_score , best_epoch ))
216
220
self .tabnet_model .load_state_dict (best_param )
217
221
torch .save (best_param , save_path )
222
+
223
+ if self .use_gpu :
224
+ torch .cuda .empty_cache ()
218
225
219
226
def predict (self , dataset ):
220
227
if not self .fitted :
@@ -264,12 +271,13 @@ def test_epoch(self, data_x, data_y):
264
271
feature = x_values [indices [i : i + self .batch_size ]].float ().to (self .device )
265
272
label = y_values [indices [i : i + self .batch_size ]].float ().to (self .device )
266
273
priors = torch .ones (self .batch_size , self .d_feat ).to (self .device )
267
- pred = self .tabnet_model (feature , priors )
268
- loss = self .loss_fn (pred , label )
269
- losses .append (loss .item ())
274
+ with torch .no_grad ():
275
+ pred = self .tabnet_model (feature , priors )
276
+ loss = self .loss_fn (pred , label )
277
+ losses .append (loss .item ())
270
278
271
- score = self .metric_fn (pred , label )
272
- scores .append (score .item ())
279
+ score = self .metric_fn (pred , label )
280
+ scores .append (score .item ())
273
281
274
282
return np .mean (losses ), np .mean (scores )
275
283
@@ -352,10 +360,11 @@ def pretrain_test_epoch(self, x_train):
352
360
label = y_train_values .float ().to (self .device )
353
361
S_mask = S_mask .to (self .device )
354
362
priors = 1 - S_mask
355
- (vec , sparse_loss ) = self .tabnet_model (feature , priors )
356
- f = self .tabnet_decoder (vec )
363
+ with torch .no_grad ():
364
+ (vec , sparse_loss ) = self .tabnet_model (feature , priors )
365
+ f = self .tabnet_decoder (vec )
357
366
358
- loss = self .pretrain_loss_fn (label , f , S_mask )
367
+ loss = self .pretrain_loss_fn (label , f , S_mask )
359
368
losses .append (loss .item ())
360
369
361
370
return np .mean (losses )
0 commit comments