-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmodel.py
121 lines (97 loc) · 4.26 KB
/
model.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
import torch
import torch.nn as nn
import torch.nn.functional as F
def conv(in_channels, out_channels, kernel_size=4, stride=2, padding=1, instance_norm=True, relu=True, relu_slope=None, init_zero_weights=False):
layers = []
conv_layer = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, bias=True)
if init_zero_weights:
conv_layer.weight.data = torch.randn(out_channels, in_channels, kernel_size, kernel_size) * 0.001
else:
nn.init.normal_(conv_layer.weight.data, 0.0, 0.02)
layers.append(conv_layer)
if instance_norm:
layers.append(nn.InstanceNorm2d(out_channels))
if relu:
if relu_slope:
relu_layer = nn.LeakyReLU(relu_slope, True)
else:
relu_layer = nn.ReLU(inplace=True)
layers.append(relu_layer)
return layers
def deconv(in_channels, out_channels, kernel_size=4, stride=2, padding=1, output_padding=1, instance_norm=True, relu=True, relu_slope=None, init_zero_weights=False):
layers = []
deconv_layer = nn.ConvTranspose2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, output_padding=output_padding, bias=True)
if init_zero_weights:
deconv_layer.weight.data = torch.randn(out_channels, in_channels, kernel_size, kernel_size) * 0.001
else:
nn.init.normal_(deconv_layer.weight.data, 0.0, 0.02)
layers.append(deconv_layer)
if instance_norm:
layers.append(nn.InstanceNorm2d(out_channels))
if relu:
if relu_slope:
relu_layer = nn.LeakyReLU(relu_slope, True)
else:
relu_layer = nn.ReLU(inplace=True)
layers.append(relu_layer)
return layers
class ResidualBlock(nn.Module):
def __init__(self, input_features):
super(ResidualBlock, self).__init__()
conv_layers = [
nn.ReflectionPad2d(1),
*conv(input_features, input_features, kernel_size=3, stride=1, padding=0),
nn.ReflectionPad2d(1),
*conv(input_features, input_features, kernel_size=3, stride=1, padding=0, relu=False)
]
self.model = nn.Sequential(*conv_layers)
def forward(self, input_data):
return input_data + self.model(input_data)
class CycleGenerator(nn.Module):
def __init__(self, in_channels=3, out_channels=3, res_blocks=9):
super(CycleGenerator, self).__init__()
# First 7 x 7 convolutional layer
layers = [
nn.ReflectionPad2d(3),
*conv(in_channels, 64, 7, stride=1, padding=0)
]
# Two 3 x 3 convolutional layers
input_features = 64
output_features = input_features * 2
for _ in range(2):
layers += conv(input_features, output_features, 3)
input_features, output_features = output_features, output_features * 2
# Residual blocks
for _ in range(res_blocks):
layers += [ResidualBlock(input_features)]
# Two 3 x 3 deconvolutional layers
output_features = input_features // 2
for _ in range(2):
layers += deconv(input_features, output_features, 3)
input_features, output_features = output_features, output_features // 2
# Output layer
layers += [
nn.ReflectionPad2d(3),
nn.Conv2d(input_features, out_channels, 7),
nn.Tanh()
]
self.model = nn.Sequential(*layers)
def forward(self, real_image):
return self.model(real_image)
class Discriminator(nn.Module):
def __init__(self, in_channels=3, conv_dim=64):
super(Discriminator, self).__init__()
C64 = conv(in_channels, conv_dim, instance_norm=False, relu_slope=0.2)
C128 = conv(conv_dim, conv_dim * 2, relu_slope=0.2)
C256 = conv(conv_dim * 2, conv_dim * 4, relu_slope=0.2)
C512 = conv(conv_dim * 4, conv_dim * 8, stride = 1, relu_slope=0.2)
C1 = conv(conv_dim * 8, 1, stride=1, instance_norm=False, relu=False)
self.model = nn.Sequential(
*C64,
*C128,
*C256,
*C512,
*C1
)
def forward(self, image):
return self.model(image)