-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmodel.py
41 lines (36 loc) · 1.56 KB
/
model.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
import torch
import torch.nn as nn
import torch.nn.functional as F
class TextCNN(nn.Module):
def __init__(self, args):
super(TextCNN, self).__init__()
self.args = args
class_num = args.class_num
chanel_num = 1
filter_num = args.filter_num
filter_sizes = args.filter_sizes
vocabulary_size = args.vocabulary_size
embedding_dimension = args.embedding_dim
self.embedding = nn.Embedding(vocabulary_size, embedding_dimension)
if args.static:
self.embedding = self.embedding.from_pretrained(args.vectors, freeze=not args.non_static)
if args.multichannel:
self.embedding2 = nn.Embedding(vocabulary_size, embedding_dimension).from_pretrained(args.vectors)
chanel_num += 1
else:
self.embedding2 = None
self.convs = nn.ModuleList([nn.Conv2d(chanel_num, filter_num, (size, embedding_dimension)) for size in filter_sizes]) # in_chanel, out_chanel, (H, W)
self.dropout = nn.Dropout(args.dropout)
self.fc = nn.Linear(len(filter_sizes) * filter_num, class_num)
def forward(self, x):
if self.embedding2:
x = torch.stack([self.embedding(x), self.embedding2(x)], dim=1)
else:
x = self.embedding(x)
x = x.unsqueeze(1)
x = [F.relu(conv(x)).squeeze(3) for conv in self.convs]
x = [F.max_pool1d(item, item.size(2)).squeeze(2) for item in x]
x = torch.cat(x, 1) # 感觉没啥用
x = self.dropout(x)
logits = self.fc(x)
return logits