-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathtraffic_data_analysis.py
291 lines (237 loc) · 11 KB
/
traffic_data_analysis.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
import tensorflow as tf
import numpy as np
import matplotlib.pylab as plt
import datetime
import time
from tensorflow.models.rnn.rnn import *
from sequence_rnn import SequenceRNN
from data_loaders import TrafficDataLoader
class TrafficRNN(SequenceRNN):
def __init__(self, is_training, config):
seq_width = config.seq_width
n_steps = config.batch_size
num_hidden = config.num_hidden
num_layers = config.num_layers
#tensors for input, target and sequence length placeholders
self._seq_input = tf.placeholder(tf.float32, [n_steps, seq_width])
self._seq_target = tf.placeholder(tf.float32, [n_steps, 1])
self._early_stop = tf.placeholder(tf.int32)
#inputs should be a list of tensors at each timestamp
inputs = [tf.reshape(data, (1, seq_width)) for data in tf.split(0, n_steps, self.seq_input)]
initializer = tf.random_uniform_initializer(-.1, .1)
cell = rnn_cell.LSTMCell(num_hidden, seq_width, initializer=initializer)
if num_layers > 1:
cell = rnn_cell.MultiRNNCell([cell]*num_layers)
#initial state
self._initial_state = cell.zero_state(1, tf.float32)
outputs, states = rnn(cell, inputs, initial_state=self._initial_state, sequence_length=self._early_stop)
#save final state of the rnn
self._final_state = states[-1]
#outputs originaly comes as a list of tensors, but we need a single tensor for tf.matmul
outputs = tf.reshape(tf.concat(1, outputs), [-1, num_hidden])
#rnn outputs
W = tf.get_variable('W', [num_hidden, 1])
b = tf.get_variable('b', [1])
_output = tf.matmul(outputs, W) + b
self._output = _output
#ops for least squares error computation
error = tf.pow(tf.reduce_sum(tf.pow(tf.sub(_output, self._seq_target), 2)), .5)
tf.scalar_summary("error", error)
self._error = error
self._merge_summaries_op = tf.merge_all_summaries()
if not is_training:
return
#learning rate
self._lr = tf.Variable(0., trainable='False', name='lr')
#trainable variables for gradient computation
tvars = tf.trainable_variables()
#compute gradients
grads, _ = tf.clip_by_global_norm(tf.gradients(self._error, tvars), config.max_grad_norm)
#2 options here: either to use GradientDescentOptimizer (config.useGDO:True) or AdamOptimizer (config.useGDO:False)
if config.useGDO:
optimizer = tf.train.GradientDescentOptimizer(self._lr)
else:
optimizer = tf.train.AdamOptimizer(self._lr)
#ops for training
self._train_op = optimizer.apply_gradients(zip(grads, tvars))
@property
def output(self):
return self._output
@property
def merge_summaries_op(self):
return self._merge_summaries_op
class TrafficDataConfig(object):
"""
For data sampled in 5 min intervals:
start: 0 hours (offset time)
window_size: 1 hours
lag: 8 hours
batch_size: 2 hours
n_steps: 336 hours or 14 days
"""
start = 0
window_size = 12
n_steps = 4032
use_1st_diffs = True
use_2nd_diffs = False
lag = 48
batch_size = 24
class TestConfig(object):
"""
For data sampled in 5 min intervals:
start: 168 hours or 7 days (offset time)
window_size: 2 hours
lag: 6 hours
batch_size: 4 hours
n_steps: 840 hours or 35 days
"""
start = 4032
window_size = 12
n_steps = 10080
use_1st_diffs = True
use_2nd_diffs = False
lag = 48
batch_size = 24
class TrafficRNNConfig(object):
max_epoch = 300
num_hidden = 50
num_layers = 1
useGDO = False
max_grad_norm = 3.
def __init__(self, config):
self.batch_size = config.batch_size
if config.use_1st_diffs and config.use_2nd_diffs:
self.seq_width = 3*config.window_size
elif config.use_1st_diffs and not config.use_2nd_diffs:
self.seq_width = 2*config.window_size
elif config.use_2nd_diffs and not config.use_1st_diffs:
self.seq_width = 2*config.window_size
else:
self.seq_width = config.window_size
def run_epoch(session, m, data, eval_op, config, writer=None):
state = m.initial_state.eval()
seq_input = data['seq_input']
seq_target = data['seq_target']
early_stop = data['early_stop']
epoch_error = 0.
rnn_outs = np.array([])
for i in range(config.n_steps/config.batch_size):
_seq_input = seq_input[i*config.batch_size:(i+1)*config.batch_size][:]
_seq_target = seq_target[i*config.batch_size:(i+1)*config.batch_size][:]
_early_stop = early_stop
feed = {m.seq_input:_seq_input, m.seq_target:_seq_target, m.early_stop:_early_stop, m.initial_state:state}
summary_str, step_error, state, step_outs, _ = session.run([m.merge_summaries_op, m.error, m.final_state, m.output, eval_op], feed_dict=feed)
epoch_error += step_error
rnn_outs = np.append(rnn_outs, step_outs)
if writer is not None:
writer.add_summary(summary_str, i)
return epoch_error, rnn_outs
def main(unused_args):
tdLoader = TrafficDataLoader('internet-data/data/internet-traffic-11-cities-5min.csv', max_norm=5.)
tdConfig = TrafficDataConfig()
tmConfig = TrafficRNNConfig(tdConfig)
batch_size = tmConfig.batch_size
seq_input, seq_target = tdLoader.get_rnn_input(tdConfig)
print seq_input.shape, seq_target.shape
data = dict()
data['seq_input'] = seq_input
data['seq_target'] = seq_target
data['early_stop'] = tdConfig.batch_size
is_training = True
save_graph = False
with tf.Graph().as_default(), tf.Session() as session:
model = TrafficRNN(is_training=True, config=tmConfig)
saver = tf.train.Saver()
merged = None
writer = None
if is_training and save_graph:
writer = tf.train.SummaryWriter('/tmp/rnn_logs', session.graph_def)
tf.initialize_all_variables().run()
decay = .8
if is_training:
lr_value = 1e-3
for epoch in range(tmConfig.max_epoch):
if epoch > 10:
lr_value = 1e-3
elif epoch > 75:
lr_value = 1e-4
elif epoch > 100:
lr_value = 1e-6
elif epoch > 200:
lr_value = 1e-7
elif epoch > 250:
lr_value = 1e-8
model.assign_lr(session, lr_value)
net_outs_all = np.array([])
error, net_outs_all = run_epoch(session, model, data, model.train_op, tdConfig)
error, net_outs_all = run_epoch(session, model, data, tf.no_op(), tdConfig, writer)
print net_outs_all.shape, seq_target.shape
print ('Epoch %d: %s') % (epoch, error)
if epoch == 0:
plt.figure(1, figsize=(20,10))
plt.ion()
plt.ylim([-1, 6])
plt.plot(xrange(tdConfig.n_steps), seq_target, 'b-', xrange(tdConfig.n_steps), net_outs_all, 'r-')
plt.show()
time.sleep(20)
else:
plt.clf()
plt.ylim([-1, 6])
plt.plot(xrange(tdConfig.n_steps), seq_target, 'b-', xrange(tdConfig.n_steps), net_outs_all, 'r-')
img_loc = 'out-img/epoch-%05d.png' % (epoch)
plt.savefig(img_loc)
plt.draw()
time.sleep(.1)
if epoch > 40 and epoch % 20 == 9:
outfile = 'internet-data/saved-models/traffic-rnn-hid-%d-batch-%d-window-%d-lag-%d.chkpnt' % (tmConfig.num_hidden,
tdConfig.batch_size,
tdConfig.window_size,
tdConfig.lag)
saver.save(session, outfile, global_step=epoch)
else:
saved_vars = 'internet-data/saved-models/traffic-rnn-hid-%d-batch-%d-window-%d-lag-%d.chkpnt-%d' % (tmConfig.num_hidden,
tdConfig.batch_size,
tdConfig.window_size,
tdConfig.lag,
tmConfig.max_epoch-1)
saver.restore(session, saved_vars)
train_error, train_outs_all = run_epoch(session, model, data, tf.no_op(), tdConfig)
testDataConfig = TestConfig()
test_seq_input, test_seq_target = tdLoader.get_rnn_input(testDataConfig)
test_data = dict()
test_outs_all = np.array([])
test_data['seq_input'] = test_seq_input
test_data['seq_target'] = test_seq_target
test_data['early_stop'] = testDataConfig.batch_size
test_error, test_outs_all = run_epoch(session, model, test_data, tf.no_op(), testDataConfig)
upper_curve = test_outs_all + .1*test_outs_all
lower_curve = test_outs_all - .1*test_outs_all
shift_left = np.zeros(test_outs_all.shape)
shift_right = np.zeros(test_outs_all.shape)
shift_left[:-18] = test_outs_all[18:]
shift_right[18:] = test_outs_all[:-18]
curve1 = np.maximum(upper_curve, shift_left)
curve1 = np.maximum(curve1, shift_left)
curve1 = np.maximum(curve1, shift_right)
curve2 = np.minimum(lower_curve, shift_right)
curve2 = np.minimum(curve2, upper_curve)
curve2 = np.minimum(curve2, shift_left)
print test_outs_all.shape
x = xrange(len(test_outs_all))
plt.figure(3, figsize=(20, 10))
plt.ioff()
plt.plot(x, test_outs_all, 'b-', alpha=1)
plt.plot(x, test_seq_target, 'g-', alpha=1)
plt.plot(x, curve1, 'r-', alpha=.1)
plt.plot(x, curve2, 'r-', alpha=.1)
plt.fill_between(x, curve1, curve2, color='grey', alpha=.3)
plt.show()
print 'Test error: %s' % test_error
plt.figure(2, figsize=(20,10))
plt.plot(xrange(tdConfig.n_steps), seq_target, 'b-', xrange(tdConfig.n_steps), train_outs_all, 'g--')
plt.plot(xrange(tdConfig.n_steps-24, tdConfig.n_steps+testDataConfig.n_steps-24), test_seq_target, 'b-')
plt.plot(xrange(tdConfig.n_steps-24, tdConfig.n_steps+testDataConfig.n_steps-24), test_outs_all, 'r--')
plt.show()
time.sleep(1)
if __name__=='__main__':
tf.app.run()