-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathutils.py
87 lines (72 loc) · 3.23 KB
/
utils.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
import torch
import numpy as np
def rank_loss(y_pred, y):
ranking_loss = torch.nn.functional.relu(
(y_pred - y_pred.t()) * torch.sign((y.t() - y))
)
scale = 1 + torch.max(ranking_loss)
return (
torch.sum(ranking_loss) / y_pred.shape[0] / (y_pred.shape[0] - 1) / scale
).float()
def plcc_loss(y_pred, y):
sigma_hat, m_hat = torch.std_mean(y_pred, unbiased=False)
y_pred = (y_pred - m_hat) / (sigma_hat + 1e-8)
sigma, m = torch.std_mean(y, unbiased=False)
y = (y - m) / (sigma + 1e-8)
loss0 = torch.nn.functional.mse_loss(y_pred, y) / 4
rho = torch.mean(y_pred * y)
loss1 = torch.nn.functional.mse_loss(rho * y_pred, y) / 4
return ((loss0 + loss1) / 2).float()
def rescale(pr, gt=None):
if gt is None:
pr = (pr - np.mean(pr)) / np.std(pr)
else:
pr = ((pr - np.mean(pr)) / np.std(pr)) * np.std(gt) + np.mean(gt)
return pr
from bisect import bisect_right
class WarmupMultiStepLR(torch.optim.lr_scheduler._LRScheduler):
def __init__(
self,
optimizer,
milestones, # [40,70]
gamma=0.1, #
warmup_factor=0.01,
warmup_iters=10,
warmup_method="linear",
last_epoch=-1,
):
if not list(milestones) == sorted(milestones): # 保证输入的list是按前后顺序放的
raise ValueError(
"Milestones should be a list of" " increasing integers. Got {}",
milestones,
)
if warmup_method not in ("constant", "linear"):
raise ValueError(
"Only 'constant' or 'linear' warmup_method accepted",
" but got {}".format(warmup_method)
)
self.milestones = milestones
self.gamma = gamma
self.warmup_factor = warmup_factor
self.warmup_iters = warmup_iters
self.warmup_method = warmup_method
super(WarmupMultiStepLR, self).__init__(optimizer, last_epoch)
'''
self.last_epoch是一直变动的[0,1,2,3,,,50]
self.warmup_iters=10固定(表示线性warm up提升10个epoch)
'''
def get_lr(self):
warmup_factor = 1
list = {}
if self.last_epoch < self.warmup_iters: # 0<10
if self.warmup_method == "constant":
warmup_factor = self.warmup_factor # 1/3
elif self.warmup_method == "linear":
alpha = self.last_epoch / self.warmup_iters # self.last_epoch是一直变动的[0,1,2,3,,,50]/10
warmup_factor = self.warmup_factor * (1 - alpha) + alpha # self.warmup_factor=1/3
list = {"last_epoch": self.last_epoch, "warmup_iters": self.warmup_iters, "alpha": alpha,
'warmup_factor': warmup_factor}
# print(base_lr for base_lr in self.base_lrs)
# print(base_lr* warmup_factor* self.gamma ** bisect_right(self.milestones, self.last_epoch) for base_lr in self.base_lrs)
return [base_lr * warmup_factor * self.gamma ** bisect_right(self.milestones, self.last_epoch) for base_lr in
self.base_lrs] # self.base_lrs,optimizer初始学习率weight_lr=0.0003,bias_lr=0.0006