forked from golbin/TensorFlow-Tutorials
-
Notifications
You must be signed in to change notification settings - Fork 0
/
03 - TensorBoard2.py
83 lines (61 loc) · 2.45 KB
/
03 - TensorBoard2.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
import tensorflow as tf
import numpy as np
data = np.loadtxt('./data.csv', delimiter=',',
unpack=True, dtype='float32')
x_data = np.transpose(data[0:2])
y_data = np.transpose(data[2:])
#########
# 신경망 모델 구성
######
global_step = tf.Variable(0, trainable=False, name='global_step')
X = tf.placeholder(tf.float32)
Y = tf.placeholder(tf.float32)
with tf.name_scope('layer1'):
W1 = tf.Variable(tf.random_uniform([2, 10], -1., 1.), name='W1')
L1 = tf.nn.relu(tf.matmul(X, W1))
tf.summary.histogram("X", X)
tf.summary.histogram("Weights", W1)
with tf.name_scope('layer2'):
W2 = tf.Variable(tf.random_uniform([10, 20], -1., 1.), name='W2')
L2 = tf.nn.relu(tf.matmul(L1, W2))
tf.summary.histogram("Weights", W2)
with tf.name_scope('output'):
W3 = tf.Variable(tf.random_uniform([20, 3], -1., 1.), name='W3')
model = tf.matmul(L2, W3)
tf.summary.histogram("Weights", W3)
tf.summary.histogram("Model", model)
with tf.name_scope('optimizer'):
cost = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=Y, logits=model))
optimizer = tf.train.AdamOptimizer(learning_rate=0.01)
train_op = optimizer.minimize(cost, global_step=global_step)
tf.summary.scalar('cost', cost)
#########
# 신경망 모델 학습
######
sess = tf.Session()
saver = tf.train.Saver(tf.global_variables())
ckpt = tf.train.get_checkpoint_state('./model')
if ckpt and tf.train.checkpoint_exists(ckpt.model_checkpoint_path):
saver.restore(sess, ckpt.model_checkpoint_path)
else:
sess.run(tf.global_variables_initializer())
merged = tf.summary.merge_all()
writer = tf.summary.FileWriter('./logs', sess.graph)
for step in range(100):
sess.run(train_op, feed_dict={X: x_data, Y: y_data})
print('Step: %d, ' % sess.run(global_step),
'Cost: %.3f' % sess.run(cost, feed_dict={X: x_data, Y: y_data}))
summary = sess.run(merged, feed_dict={X: x_data, Y: y_data})
writer.add_summary(summary, global_step=sess.run(global_step))
saver.save(sess, './model/dnn.ckpt', global_step=global_step)
#########
# 결과 확인
######
prediction = tf.argmax(model, 1)
target = tf.argmax(Y, 1)
print('예측값:', sess.run(prediction, feed_dict={X: x_data}))
print('실제값:', sess.run(target, feed_dict={Y: y_data}))
is_correct = tf.equal(prediction, target)
accuracy = tf.reduce_mean(tf.cast(is_correct, tf.float32))
print('정확도: %.2f' % sess.run(accuracy * 100, feed_dict={X: x_data, Y: y_data}))