-
Notifications
You must be signed in to change notification settings - Fork 26
/
aae.py
128 lines (100 loc) · 4.56 KB
/
aae.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
import tensorflow as tf
# MLP as encoder
def MLP_encoder(x, n_hidden, n_output, keep_prob):
with tf.variable_scope("MLP_encoder"):
# initializers
w_init = tf.contrib.layers.xavier_initializer()
b_init = tf.constant_initializer(0.)
# 1st hidden layer
w0 = tf.get_variable('w0', [x.get_shape()[1], n_hidden], initializer=w_init)
b0 = tf.get_variable('b0', [n_hidden], initializer=b_init)
h0 = tf.matmul(x, w0) + b0
h0 = tf.nn.relu(h0)
h0 = tf.nn.dropout(h0, keep_prob)
# 2nd hidden layer
w1 = tf.get_variable('w1', [h0.get_shape()[1], n_hidden], initializer=w_init)
b1 = tf.get_variable('b1', [n_hidden], initializer=b_init)
h1 = tf.matmul(h0, w1) + b1
h1 = tf.nn.relu(h1)
h1 = tf.nn.dropout(h1, keep_prob)
# output layer
wo = tf.get_variable('wo', [h1.get_shape()[1], n_output], initializer=w_init)
bo = tf.get_variable('bo', [n_output], initializer=b_init)
output = tf.matmul(h1, wo) + bo
return output
# MLP as decoder
def MLP_decoder(z, n_hidden, n_output, keep_prob, reuse=False):
with tf.variable_scope("MLP_decoder", reuse=reuse):
# initializers
w_init = tf.contrib.layers.xavier_initializer()
b_init = tf.constant_initializer(0.)
# 1st hidden layer
w0 = tf.get_variable('w0', [z.get_shape()[1], n_hidden], initializer=w_init)
b0 = tf.get_variable('b0', [n_hidden], initializer=b_init)
h0 = tf.matmul(z, w0) + b0
h0 = tf.nn.relu(h0)
h0 = tf.nn.dropout(h0, keep_prob)
# 2nd hidden layer
w1 = tf.get_variable('w1', [h0.get_shape()[1], n_hidden], initializer=w_init)
b1 = tf.get_variable('b1', [n_hidden], initializer=b_init)
h1 = tf.matmul(h0, w1) + b1
h1 = tf.nn.relu(h1)
h1 = tf.nn.dropout(h1, keep_prob)
# output layer
wo = tf.get_variable('wo', [h1.get_shape()[1], n_output], initializer=w_init)
bo = tf.get_variable('bo', [n_output], initializer=b_init)
y = tf.sigmoid(tf.matmul(h1, wo) + bo)
return y
# Discriminator
def discriminator(z, n_hidden, n_output, keep_prob, reuse=False):
with tf.variable_scope("discriminator", reuse=reuse):
# initializers
w_init = tf.contrib.layers.xavier_initializer()
b_init = tf.constant_initializer(0.)
# 1st hidden layer
w0 = tf.get_variable('w0', [z.get_shape()[1], n_hidden], initializer=w_init)
b0 = tf.get_variable('b0', [n_hidden], initializer=b_init)
h0 = tf.matmul(z, w0) + b0
h0 = tf.nn.relu(h0)
h0 = tf.nn.dropout(h0, keep_prob)
# 2nd hidden layer
w1 = tf.get_variable('w1', [h0.get_shape()[1], n_hidden], initializer=w_init)
b1 = tf.get_variable('b1', [n_hidden], initializer=b_init)
h1 = tf.matmul(h0, w1) + b1
h1 = tf.nn.relu(h1)
h1 = tf.nn.dropout(h1, keep_prob)
# output layer
wo = tf.get_variable('wo', [h1.get_shape()[1], n_output], initializer=w_init)
bo = tf.get_variable('bo', [n_output], initializer=b_init)
y = tf.matmul(h1, wo) + bo
return tf.sigmoid(y), y
# Gateway
def adversarial_autoencoder(x_hat, x, x_id, z_sample, z_id, dim_img, dim_z, n_hidden, keep_prob):
## Reconstruction Loss
# encoding
z = MLP_encoder(x_hat, n_hidden, dim_z, keep_prob)
# decoding
y = MLP_decoder(z, n_hidden, dim_img, keep_prob)
# loss
marginal_likelihood = -tf.reduce_mean(tf.reduce_mean(tf.squared_difference(x,y)))
## GAN Loss
z_real = tf.concat([z_sample, z_id],1)
z_fake = tf.concat([z, x_id],1)
D_real, D_real_logits = discriminator(z_real, (int)(n_hidden), 1, keep_prob)
D_fake, D_fake_logits = discriminator(z_fake, (int)(n_hidden), 1, keep_prob, reuse=True)
# discriminator loss
D_loss_real = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=D_real_logits, labels=tf.ones_like(D_real_logits)))
D_loss_fake = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=D_fake_logits, labels=tf.zeros_like(D_fake_logits)))
D_loss = D_loss_real+D_loss_fake
# generator loss
G_loss = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=D_fake_logits, labels=tf.ones_like(D_fake_logits)))
marginal_likelihood = tf.reduce_mean(marginal_likelihood)
D_loss = tf.reduce_mean(D_loss)
G_loss = tf.reduce_mean(G_loss)
return y, z, -marginal_likelihood, D_loss, G_loss
def decoder(z, dim_img, n_hidden):
y = MLP_decoder(z, n_hidden, dim_img, 1.0, reuse=True)
return y