-
Notifications
You must be signed in to change notification settings - Fork 4
/
Copy pathloss_multi.py
executable file
·72 lines (57 loc) · 2.27 KB
/
loss_multi.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
import torch
import torch.nn as nn
import torch.nn.functional as F
import utils.distributed as du
class lossAV(nn.Module):
def __init__(self):
super(lossAV, self).__init__()
self.criterion = nn.CrossEntropyLoss(reduction='none')
self.FC = nn.Linear(256, 2)
def forward(self, x, labels=None, masks=None):
x = x.squeeze(1)
x = self.FC(x)
if labels == None:
predScore = x[:, 1]
predScore = predScore.t()
predScore = predScore.view(-1).detach().cpu().numpy()
return predScore
else:
nloss = self.criterion(x, labels) * masks
num_valid = masks.sum().float()
if self.training:
[num_valid] = du.all_reduce([num_valid],average=True)
nloss = torch.sum(nloss) / num_valid
predScore = F.softmax(x, dim=-1)
predLabel = torch.round(F.softmax(x, dim=-1))[:, 1]
correctNum = ((predLabel == labels) * masks).sum().float()
return nloss, predScore, predLabel, correctNum
class lossA(nn.Module):
def __init__(self):
super(lossA, self).__init__()
self.criterion = nn.CrossEntropyLoss(reduction='none')
self.FC = nn.Linear(128, 2)
def forward(self, x, labels, masks=None):
x = x.squeeze(1)
x = self.FC(x)
nloss = self.criterion(x, labels) * masks
num_valid = masks.sum().float()
if self.training:
[num_valid] = du.all_reduce([num_valid],average=True)
nloss = torch.sum(nloss) / num_valid
#nloss = torch.sum(nloss) / torch.sum(masks)
return nloss
class lossV(nn.Module):
def __init__(self):
super(lossV, self).__init__()
self.criterion = nn.CrossEntropyLoss(reduction='none')
self.FC = nn.Linear(128, 2)
def forward(self, x, labels, masks=None):
x = x.squeeze(1)
x = self.FC(x)
nloss = self.criterion(x, labels) * masks
# nloss = torch.sum(nloss) / torch.sum(masks)
num_valid = masks.sum().float()
if self.training:
[num_valid] = du.all_reduce([num_valid],average=True)
nloss = torch.sum(nloss) / num_valid
return nloss