-
Notifications
You must be signed in to change notification settings - Fork 0
/
layer.py
73 lines (61 loc) · 2.56 KB
/
layer.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
import tensorflow as tf
import distutil as util
conv_counter = 0
affine_counter = 0
pool_counter = 0
norm_counter = 0
parameters = []
def _conv(inpOp, nIn, nOut, kH, kW, dH, dW, padType):
global conv_counter
global parameters
name = 'conv' + str(conv_counter)
conv_counter += 1
with tf.variable_scope(name) as scope:
kernel = util._variable_with_weight_decay('weights', shape=[kH, kW, nIn, nOut],
stddev=1e-4, wd=0.0)
strides = [1, dH, dW, 1]
conv = tf.nn.conv2d(inpOp, kernel, strides, padding=padType)
biases = util._variable_on_cpu('biases', [nOut], tf.constant_initializer(0.0))
bias = tf.nn.bias_add(conv, biases)
conv1 = tf.nn.relu(bias, name=scope.name)
util.print_activations(conv1)
return conv1
def _affine(inpOp, nIn, nOut):
global affine_counter
global parameters
name = 'affine' + str(affine_counter)
affine_counter += 1
with tf.name_scope(name) as scope:
kernel = tf.Variable(tf.truncated_normal([nIn, nOut],
dtype=tf.float32,
stddev=1e-1), name='weights')
biases = tf.Variable(tf.constant(0.0, shape=[nOut], dtype=tf.float32),
trainable=True, name='biases')
affine1 = tf.nn.relu_layer(inpOp, kernel, biases, name=name)
parameters += [kernel, biases]
return affine1
def _mpool(inpOp, kH, kW, dH, dW):
global pool_counter
global parameters
name = 'pool' + str(pool_counter)
pool_counter += 1
ksize = [1, kH, kW, 1]
strides = [1, dH, dW, 1]
return tf.nn.max_pool(inpOp,ksize=ksize,
strides=strides,padding='SAME',name=name)
def _norm(inpOp, depth, bias, alpha, beta):
global norm_counter
global parameters
name = 'norm' + str(norm_counter)
return tf.nn.lrn(inpOp, depth_radius=depth, bias= bias, alpha=alpha, beta=beta, name=name)
def loss(logits, labels):
batch_size = tf.size(labels)
labels = tf.expand_dims(labels, 1)
indices = tf.expand_dims(tf.range(0, batch_size, 1), 1)
concated = tf.concat(1, [indices, labels])
onehot_labels = tf.sparse_to_dense(
concated, tf.stack([batch_size, 1000]), 1.0, 0.0)
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(
logits=logits, labels=onehot_labels, name='xentropy')
loss = tf.reduce_mean(cross_entropy, name='xentropy_mean')
return loss