Skip to content

Commit 052e5e8

Browse files
committed
Converted the models repo to TF 1.0 using the upgrade script
1 parent f21c427 commit 052e5e8

File tree

71 files changed

+314
-314
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

71 files changed

+314
-314
lines changed

autoencoder/autoencoder_models/Autoencoder.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ def __init__(self, n_input, n_hidden, transfer_function=tf.nn.softplus, optimize
1818
self.reconstruction = tf.add(tf.matmul(self.hidden, self.weights['w2']), self.weights['b2'])
1919

2020
# cost
21-
self.cost = 0.5 * tf.reduce_sum(tf.pow(tf.sub(self.reconstruction, self.x), 2.0))
21+
self.cost = 0.5 * tf.reduce_sum(tf.pow(tf.subtract(self.reconstruction, self.x), 2.0))
2222
self.optimizer = optimizer.minimize(self.cost)
2323

2424
init = tf.global_variables_initializer()

autoencoder/autoencoder_models/DenoisingAutoencoder.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@ def __init__(self, n_input, n_hidden, transfer_function = tf.nn.softplus, optimi
2222
self.reconstruction = tf.add(tf.matmul(self.hidden, self.weights['w2']), self.weights['b2'])
2323

2424
# cost
25-
self.cost = 0.5 * tf.reduce_sum(tf.pow(tf.sub(self.reconstruction, self.x), 2.0))
25+
self.cost = 0.5 * tf.reduce_sum(tf.pow(tf.subtract(self.reconstruction, self.x), 2.0))
2626
self.optimizer = optimizer.minimize(self.cost)
2727

2828
init = tf.global_variables_initializer()
@@ -89,7 +89,7 @@ def __init__(self, n_input, n_hidden, transfer_function = tf.nn.softplus, optimi
8989
self.reconstruction = tf.add(tf.matmul(self.hidden, self.weights['w2']), self.weights['b2'])
9090

9191
# cost
92-
self.cost = 0.5 * tf.reduce_sum(tf.pow(tf.sub(self.reconstruction, self.x), 2.0))
92+
self.cost = 0.5 * tf.reduce_sum(tf.pow(tf.subtract(self.reconstruction, self.x), 2.0))
9393
self.optimizer = optimizer.minimize(self.cost)
9494

9595
init = tf.global_variables_initializer()

autoencoder/autoencoder_models/VariationalAutoencoder.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -17,13 +17,13 @@ def __init__(self, n_input, n_hidden, optimizer = tf.train.AdamOptimizer()):
1717
self.z_log_sigma_sq = tf.add(tf.matmul(self.x, self.weights['log_sigma_w1']), self.weights['log_sigma_b1'])
1818

1919
# sample from gaussian distribution
20-
eps = tf.random_normal(tf.pack([tf.shape(self.x)[0], self.n_hidden]), 0, 1, dtype = tf.float32)
21-
self.z = tf.add(self.z_mean, tf.mul(tf.sqrt(tf.exp(self.z_log_sigma_sq)), eps))
20+
eps = tf.random_normal(tf.stack([tf.shape(self.x)[0], self.n_hidden]), 0, 1, dtype = tf.float32)
21+
self.z = tf.add(self.z_mean, tf.multiply(tf.sqrt(tf.exp(self.z_log_sigma_sq)), eps))
2222

2323
self.reconstruction = tf.add(tf.matmul(self.z, self.weights['w2']), self.weights['b2'])
2424

2525
# cost
26-
reconstr_loss = 0.5 * tf.reduce_sum(tf.pow(tf.sub(self.reconstruction, self.x), 2.0))
26+
reconstr_loss = 0.5 * tf.reduce_sum(tf.pow(tf.subtract(self.reconstruction, self.x), 2.0))
2727
latent_loss = -0.5 * tf.reduce_sum(1 + self.z_log_sigma_sq
2828
- tf.square(self.z_mean)
2929
- tf.exp(self.z_log_sigma_sq), 1)

compression/decoder.py

100755100644
File mode changed.

compression/encoder.py

100755100644
File mode changed.

compression/msssim.py

100755100644
File mode changed.

differential_privacy/dp_sgd/dp_mnist/dp_mnist.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -273,7 +273,7 @@ def Train(mnist_train_file, mnist_test_file, network_parameters, num_steps,
273273
images, network_parameters)
274274

275275
cost = tf.nn.softmax_cross_entropy_with_logits(
276-
logits, tf.one_hot(labels, 10))
276+
logits=logits, labels=tf.one_hot(labels, 10))
277277

278278
# The actual cost is the average across the examples.
279279
cost = tf.reduce_sum(cost, [0]) / batch_size
@@ -343,7 +343,7 @@ def Train(mnist_train_file, mnist_test_file, network_parameters, num_steps,
343343

344344
# We need to maintain the intialization sequence.
345345
for v in tf.trainable_variables():
346-
sess.run(tf.initialize_variables([v]))
346+
sess.run(tf.variables_initializer([v]))
347347
sess.run(tf.global_variables_initializer())
348348
sess.run(init_ops)
349349

differential_privacy/dp_sgd/dp_optimizer/utils.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -236,7 +236,7 @@ def BatchClipByL2norm(t, upper_bound, name=None):
236236
with tf.op_scope([t, upper_bound], name, "batch_clip_by_l2norm") as name:
237237
saved_shape = tf.shape(t)
238238
batch_size = tf.slice(saved_shape, [0], [1])
239-
t2 = tf.reshape(t, tf.concat(0, [batch_size, [-1]]))
239+
t2 = tf.reshape(t, tf.concat(axis=0, values=[batch_size, [-1]]))
240240
upper_bound_inv = tf.fill(tf.slice(saved_shape, [0], [1]),
241241
tf.constant(1.0/upper_bound))
242242
# Add a small number to avoid divide by 0
@@ -266,7 +266,7 @@ def SoftThreshold(t, threshold_ratio, name=None):
266266
assert threshold_ratio >= 0
267267
with tf.op_scope([t, threshold_ratio], name, "soft_thresholding") as name:
268268
saved_shape = tf.shape(t)
269-
t2 = tf.reshape(t, tf.concat(0, [tf.slice(saved_shape, [0], [1]), -1]))
269+
t2 = tf.reshape(t, tf.concat(axis=0, values=[tf.slice(saved_shape, [0], [1]), -1]))
270270
t_abs = tf.abs(t2)
271271
t_x = tf.sign(t2) * tf.nn.relu(t_abs -
272272
(tf.reduce_mean(t_abs, [0],

differential_privacy/dp_sgd/per_example_gradients/per_example_gradients.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -189,7 +189,7 @@ def __call__(self, x, z_grads):
189189
z_grads, = z_grads
190190
x_expanded = tf.expand_dims(x, 2)
191191
z_grads_expanded = tf.expand_dims(z_grads, 1)
192-
return tf.mul(x_expanded, z_grads_expanded)
192+
return tf.multiply(x_expanded, z_grads_expanded)
193193

194194

195195
pxg_registry.Register("MatMul", MatMulPXG)
@@ -245,7 +245,7 @@ def _PxConv2DBuilder(self, input_, w, strides, padding):
245245
num_x = int(conv_x.get_shape()[0])
246246
assert num_x == 1, num_x
247247
assert len(conv_px) == batch_size
248-
conv = tf.concat(0, conv_px)
248+
conv = tf.concat(axis=0, values=conv_px)
249249
assert int(conv.get_shape()[0]) == batch_size
250250
return conv, w_px
251251

@@ -274,7 +274,7 @@ def __call__(self, w, z_grads):
274274
self.colocate_gradients_with_ops,
275275
gate_gradients=self.gate_gradients)
276276

277-
return tf.pack(gradients_list)
277+
return tf.stack(gradients_list)
278278

279279
pxg_registry.Register("Conv2D", Conv2DPXG)
280280

differential_privacy/multiple_teachers/deep_cnn.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -75,7 +75,7 @@ def _variable_with_weight_decay(name, shape, stddev, wd):
7575
var = _variable_on_cpu(name, shape,
7676
tf.truncated_normal_initializer(stddev=stddev))
7777
if wd is not None:
78-
weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')
78+
weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
7979
tf.add_to_collection('losses', weight_decay)
8080
return var
8181

@@ -398,7 +398,7 @@ def train_op_fun(total_loss, global_step):
398398
decay_steps,
399399
LEARNING_RATE_DECAY_FACTOR,
400400
staircase=True)
401-
tf.scalar_summary('learning_rate', lr)
401+
tf.summary.scalar('learning_rate', lr)
402402

403403
# Generate moving averages of all losses and associated summaries.
404404
loss_averages_op = moving_av(total_loss)
@@ -413,7 +413,7 @@ def train_op_fun(total_loss, global_step):
413413

414414
# Add histograms for trainable variables.
415415
for var in tf.trainable_variables():
416-
tf.histogram_summary(var.op.name, var)
416+
tf.summary.histogram(var.op.name, var)
417417

418418
# Track the moving averages of all trainable variables.
419419
variable_averages = tf.train.ExponentialMovingAverage(
@@ -485,7 +485,7 @@ def train(images, labels, ckpt_path, dropout=False):
485485
train_op = train_op_fun(loss, global_step)
486486

487487
# Create a saver.
488-
saver = tf.train.Saver(tf.all_variables())
488+
saver = tf.train.Saver(tf.global_variables())
489489

490490
print("Graph constructed and saver created")
491491

differential_privacy/privacy_accountant/tf/accountant.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -361,12 +361,12 @@ def _differential_moments(self, sigma, s, t):
361361
exponents = tf.constant([j * (j + 1.0 - 2.0 * s) / (2.0 * sigma * sigma)
362362
for j in range(t + 1)], dtype=tf.float64)
363363
# x[i, j] = binomial[i, j] * signs[i, j] = (i choose j) * (-1)^{i-j}
364-
x = tf.mul(binomial, signs)
364+
x = tf.multiply(binomial, signs)
365365
# y[i, j] = x[i, j] * exp(exponents[j])
366366
# = (i choose j) * (-1)^{i-j} * exp(j(j-1)/(2 sigma^2))
367367
# Note: this computation is done by broadcasting pointwise multiplication
368368
# between [t+1, t+1] tensor and [t+1] tensor.
369-
y = tf.mul(x, tf.exp(exponents))
369+
y = tf.multiply(x, tf.exp(exponents))
370370
# z[i] = sum_j y[i, j]
371371
# = sum_j (i choose j) * (-1)^{i-j} * exp(j(j-1)/(2 sigma^2))
372372
z = tf.reduce_sum(y, 1)

im2txt/im2txt/show_and_tell_model.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -264,7 +264,7 @@ def build_model(self):
264264
if self.mode == "inference":
265265
# In inference mode, use concatenated states for convenient feeding and
266266
# fetching.
267-
tf.concat(initial_state, 1, name="initial_state")
267+
tf.concat(axis=initial_state, values=1, name="initial_state")
268268

269269
# Placeholder for feeding a batch of concatenated states.
270270
state_feed = tf.placeholder(dtype=tf.float32,
@@ -274,11 +274,11 @@ def build_model(self):
274274

275275
# Run a single LSTM step.
276276
lstm_outputs, state_tuple = lstm_cell(
277-
inputs=tf.squeeze(self.seq_embeddings, squeeze_dims=[1]),
277+
inputs=tf.squeeze(self.seq_embeddings, axis=[1]),
278278
state=state_tuple)
279279

280280
# Concatentate the resulting state.
281-
tf.concat(state_tuple, 1, name="state")
281+
tf.concat(axis=state_tuple, values=1, name="state")
282282
else:
283283
# Run the batch of sequence embeddings through the LSTM.
284284
sequence_length = tf.reduce_sum(self.input_mask, 1)

inception/inception/data/preprocess_imagenet_validation_data.py

100755100644
File mode changed.

inception/inception/data/process_bounding_boxes.py

100755100644
File mode changed.

inception/inception/image_processing.py

+8-8
Original file line numberDiff line numberDiff line change
@@ -221,7 +221,7 @@ def distort_image(image, height, width, bbox, thread_id=0, scope=None):
221221
if not thread_id:
222222
image_with_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0),
223223
bbox)
224-
tf.image_summary('image_with_bounding_boxes', image_with_box)
224+
tf.summary.image('image_with_bounding_boxes', image_with_box)
225225

226226
# A large fraction of image datasets contain a human-annotated bounding
227227
# box delineating the region of the image containing the object of interest.
@@ -242,7 +242,7 @@ def distort_image(image, height, width, bbox, thread_id=0, scope=None):
242242
if not thread_id:
243243
image_with_distorted_box = tf.image.draw_bounding_boxes(
244244
tf.expand_dims(image, 0), distort_bbox)
245-
tf.image_summary('images_with_distorted_bounding_box',
245+
tf.summary.image('images_with_distorted_bounding_box',
246246
image_with_distorted_box)
247247

248248
# Crop the image to the specified bounding box.
@@ -259,7 +259,7 @@ def distort_image(image, height, width, bbox, thread_id=0, scope=None):
259259
# the third dimension.
260260
distorted_image.set_shape([height, width, 3])
261261
if not thread_id:
262-
tf.image_summary('cropped_resized_image',
262+
tf.summary.image('cropped_resized_image',
263263
tf.expand_dims(distorted_image, 0))
264264

265265
# Randomly flip the image horizontally.
@@ -269,7 +269,7 @@ def distort_image(image, height, width, bbox, thread_id=0, scope=None):
269269
distorted_image = distort_color(distorted_image, thread_id)
270270

271271
if not thread_id:
272-
tf.image_summary('final_distorted_image',
272+
tf.summary.image('final_distorted_image',
273273
tf.expand_dims(distorted_image, 0))
274274
return distorted_image
275275

@@ -328,8 +328,8 @@ def image_preprocessing(image_buffer, bbox, train, thread_id=0):
328328
image = eval_image(image, height, width)
329329

330330
# Finally, rescale to [-1,1] instead of [0, 1)
331-
image = tf.sub(image, 0.5)
332-
image = tf.mul(image, 2.0)
331+
image = tf.subtract(image, 0.5)
332+
image = tf.multiply(image, 2.0)
333333
return image
334334

335335

@@ -394,7 +394,7 @@ def parse_example_proto(example_serialized):
394394
ymax = tf.expand_dims(features['image/object/bbox/ymax'].values, 0)
395395

396396
# Note that we impose an ordering of (y, x) just to make life difficult.
397-
bbox = tf.concat(0, [ymin, xmin, ymax, xmax])
397+
bbox = tf.concat(axis=0, values=[ymin, xmin, ymax, xmax])
398398

399399
# Force the variable number of bounding boxes into the shape
400400
# [1, num_boxes, coords].
@@ -505,6 +505,6 @@ def batch_inputs(dataset, batch_size, train, num_preprocess_threads=None,
505505
images = tf.reshape(images, shape=[batch_size, height, width, depth])
506506

507507
# Display the training images in the visualizer.
508-
tf.image_summary('images', images)
508+
tf.summary.image('images', images)
509509

510510
return images, tf.reshape(label_index_batch, [batch_size])

inception/inception/inception_distributed_train.py

+6-6
Original file line numberDiff line numberDiff line change
@@ -133,7 +133,7 @@ def train(target, dataset, cluster_spec):
133133
FLAGS.learning_rate_decay_factor,
134134
staircase=True)
135135
# Add a summary to track the learning rate.
136-
tf.scalar_summary('learning_rate', lr)
136+
tf.summary.scalar('learning_rate', lr)
137137

138138
# Create an optimizer that performs gradient descent.
139139
opt = tf.train.RMSPropOptimizer(lr,
@@ -171,8 +171,8 @@ def train(target, dataset, cluster_spec):
171171
loss_name = l.op.name
172172
# Name each loss as '(raw)' and name the moving average version of the
173173
# loss as the original loss name.
174-
tf.scalar_summary(loss_name + ' (raw)', l)
175-
tf.scalar_summary(loss_name, loss_averages.average(l))
174+
tf.summary.scalar(loss_name + ' (raw)', l)
175+
tf.summary.scalar(loss_name, loss_averages.average(l))
176176

177177
# Add dependency to compute loss_averages.
178178
with tf.control_dependencies([loss_averages_op]):
@@ -191,7 +191,7 @@ def train(target, dataset, cluster_spec):
191191

192192
# Add histograms for model variables.
193193
for var in variables_to_average:
194-
tf.histogram_summary(var.op.name, var)
194+
tf.summary.histogram(var.op.name, var)
195195

196196
# Create synchronous replica optimizer.
197197
opt = tf.train.SyncReplicasOptimizer(
@@ -215,7 +215,7 @@ def train(target, dataset, cluster_spec):
215215
# Add histograms for gradients.
216216
for grad, var in grads:
217217
if grad is not None:
218-
tf.histogram_summary(var.op.name + '/gradients', grad)
218+
tf.summary.histogram(var.op.name + '/gradients', grad)
219219

220220
apply_gradients_op = opt.apply_gradients(grads, global_step=global_step)
221221

@@ -233,7 +233,7 @@ def train(target, dataset, cluster_spec):
233233
saver = tf.train.Saver()
234234

235235
# Build the summary operation based on the TF collection of Summaries.
236-
summary_op = tf.merge_all_summaries()
236+
summary_op = tf.summary.merge_all()
237237

238238
# Build an initialization operation to run below.
239239
init_op = tf.global_variables_initializer()

inception/inception/inception_eval.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -158,10 +158,10 @@ def evaluate(dataset):
158158
saver = tf.train.Saver(variables_to_restore)
159159

160160
# Build the summary operation based on the TF collection of Summaries.
161-
summary_op = tf.merge_all_summaries()
161+
summary_op = tf.summary.merge_all()
162162

163163
graph_def = tf.get_default_graph().as_graph_def()
164-
summary_writer = tf.train.SummaryWriter(FLAGS.eval_dir,
164+
summary_writer = tf.summary.FileWriter(FLAGS.eval_dir,
165165
graph_def=graph_def)
166166

167167
while True:

inception/inception/inception_model.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -115,7 +115,7 @@ def loss(logits, labels, batch_size=None):
115115
# shape [FLAGS.batch_size, num_classes].
116116
sparse_labels = tf.reshape(labels, [batch_size, 1])
117117
indices = tf.reshape(tf.range(batch_size), [batch_size, 1])
118-
concated = tf.concat(1, [indices, sparse_labels])
118+
concated = tf.concat(axis=1, values=[indices, sparse_labels])
119119
num_classes = logits[0].get_shape()[-1].value
120120
dense_labels = tf.sparse_to_dense(concated,
121121
[batch_size, num_classes],
@@ -147,8 +147,8 @@ def _activation_summary(x):
147147
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
148148
# session. This helps the clarity of presentation on tensorboard.
149149
tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)
150-
tf.contrib.deprecated.histogram_summary(tensor_name + '/activations', x)
151-
tf.contrib.deprecated.scalar_summary(tensor_name + '/sparsity', tf.nn.zero_fraction(x))
150+
tf.summary.histogram(tensor_name + '/activations', x)
151+
tf.summary.scalar(tensor_name + '/sparsity', tf.nn.zero_fraction(x))
152152

153153

154154
def _activation_summaries(endpoints):

0 commit comments

Comments
 (0)