From 960ab0490bb6e3600b031d0aa5ccfcf754d41915 Mon Sep 17 00:00:00 2001 From: MrStronger <1274552194@qq.com> Date: Mon, 25 Jun 2018 12:25:11 +0800 Subject: [PATCH] fix version bugs --- README.md | 10 ++++++++++ model.py | 2 +- train.py | 8 ++++---- 3 files changed, 15 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index 534a842..4d35f70 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,16 @@ # Autopilot-TensorFlow A TensorFlow implementation of this [Nvidia paper](https://arxiv.org/pdf/1604.07316.pdf) with some changes. +# Installation +Download the latest version of [python](https://www.python.org/downloads/) and [pip](https://pip.pypa.io/en/stable/installing/) + +Install TensorFlow [How](https://www.tensorflow.org/install/) + +Use pip to install several packages +`pip install scipy Pillow opencv-python` + +Activate tensorflow and enter the project directory + # How to Use Download the [dataset](https://drive.google.com/file/d/0B-KJCaaF7elleG1RbzVPZWV4Tlk/view?usp=sharing) and extract into the repository folder diff --git a/model.py b/model.py index 0bc4576..84984a8 100644 --- a/model.py +++ b/model.py @@ -85,4 +85,4 @@ def conv2d(x, W, stride): W_fc5 = weight_variable([10, 1]) b_fc5 = bias_variable([1]) -y = tf.mul(tf.atan(tf.matmul(h_fc4_drop, W_fc5) + b_fc5), 2) #scale the atan output +y = tf.multiply(tf.atan(tf.matmul(h_fc4_drop, W_fc5) + b_fc5), 2) #scale the atan output diff --git a/train.py b/train.py index b1fad04..f21fbd0 100644 --- a/train.py +++ b/train.py @@ -12,20 +12,20 @@ train_vars = tf.trainable_variables() -loss = tf.reduce_mean(tf.square(tf.sub(model.y_, model.y))) + tf.add_n([tf.nn.l2_loss(v) for v in train_vars]) * L2NormConst +loss = tf.reduce_mean(tf.square(tf.subtract(model.y_, model.y))) + tf.add_n([tf.nn.l2_loss(v) for v in train_vars]) * L2NormConst train_step = tf.train.AdamOptimizer(1e-4).minimize(loss) sess.run(tf.initialize_all_variables()) # create a summary to monitor cost tensor -tf.scalar_summary("loss", loss) +tf.summary.scalar("loss", loss) # merge all summaries into a single op -merged_summary_op = tf.merge_all_summaries() +merged_summary_op = tf.summary.merge_all() saver = tf.train.Saver(write_version = saver_pb2.SaverDef.V1) # op to write logs to Tensorboard logs_path = './logs' -summary_writer = tf.train.SummaryWriter(logs_path, graph=tf.get_default_graph()) +summary_writer = tf.summary.FileWriter(logs_path, graph=tf.get_default_graph()) epochs = 30 batch_size = 100