diff --git a/.idea/misc.xml b/.idea/misc.xml
new file mode 100644
index 0000000..3999087
--- /dev/null
+++ b/.idea/misc.xml
@@ -0,0 +1,7 @@
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/.idea/modules.xml b/.idea/modules.xml
new file mode 100644
index 0000000..a476500
--- /dev/null
+++ b/.idea/modules.xml
@@ -0,0 +1,8 @@
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/.idea/text_classification.iml b/.idea/text_classification.iml
new file mode 100644
index 0000000..6711606
--- /dev/null
+++ b/.idea/text_classification.iml
@@ -0,0 +1,11 @@
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/.idea/vcs.xml b/.idea/vcs.xml
new file mode 100644
index 0000000..94a25f7
--- /dev/null
+++ b/.idea/vcs.xml
@@ -0,0 +1,6 @@
+
+
+
+
+
+
\ No newline at end of file
diff --git a/.idea/workspace.xml b/.idea/workspace.xml
new file mode 100644
index 0000000..8b5dc32
--- /dev/null
+++ b/.idea/workspace.xml
@@ -0,0 +1,519 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ true
+ DEFINITION_ORDER
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ 1541817872260
+
+
+ 1541817872260
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/cnn_classification/cnn.py b/cnn_classification/cnn.py
index b68d966..19d69cd 100644
--- a/cnn_classification/cnn.py
+++ b/cnn_classification/cnn.py
@@ -5,7 +5,7 @@
class TextCNN(object):
"""
A CNN class for sentence classification
- With a embedding layer + a convolutional, max-pooling and softmax layer
+ With an embedding layer + a convolutional, max-pooling and softmax layer
"""
def __init__(self, sequence_length, num_classes, vocab_size,
embedding_size, filter_sizes, num_filters, l2_reg_lambda=0.0):
diff --git a/cnn_classification/train.py b/cnn_classification/train.py
index 76c746e..7dbb566 100644
--- a/cnn_classification/train.py
+++ b/cnn_classification/train.py
@@ -76,11 +76,11 @@ def train(x_train, y_train, vocab_processor, x_dev, y_dev):
# initialize cnn
cnn = TextCNN(sequence_length=x_train.shape[1],
num_classes=y_train.shape[1],
- vocab_size= len(vocab_processor.vocabulary_),
+ vocab_size=len(vocab_processor.vocabulary_),
embedding_size=FLAGS.embedding_dim,
- filter_sizes= list(map(int, FLAGS.filter_sizes.split(','))),
- num_filters= FLAGS.num_filters,
- l2_reg_lambda= FLAGS.l2_reg_lambda)
+ filter_sizes=list(map(int, FLAGS.filter_sizes.split(','))),
+ num_filters=FLAGS.num_filters,
+ l2_reg_lambda=FLAGS.l2_reg_lambda)
# define training procedure
global_step = tf.Variable(0, name='global_step', trainable=False)
diff --git a/.gitignore b/rnn_classification/.gitignore
similarity index 53%
rename from .gitignore
rename to rnn_classification/.gitignore
index 11614af..2fc467b 100644
--- a/.gitignore
+++ b/rnn_classification/.gitignore
@@ -1,3 +1,9 @@
+*.npy
+runs/
+
+# Created by https://www.gitignore.io/api/python,ipythonnotebook
+
+### Python ###
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
@@ -8,6 +14,7 @@ __pycache__/
# Distribution / packaging
.Python
+env/
build/
develop-eggs/
dist/
@@ -19,12 +26,9 @@ lib64/
parts/
sdist/
var/
-wheels/
-share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
-MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
@@ -39,15 +43,12 @@ pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
-.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
-*.cover
-.hypothesis/
-.pytest_cache/
+*,cover
# Translations
*.mo
@@ -55,15 +56,6 @@ coverage.xml
# Django stuff:
*.log
-local_settings.py
-db.sqlite3
-
-# Flask stuff:
-instance/
-.webassets-cache
-
-# Scrapy stuff:
-.scrapy
# Sphinx documentation
docs/_build/
@@ -71,45 +63,7 @@ docs/_build/
# PyBuilder
target/
-# Jupyter Notebook
-.ipynb_checkpoints
-
-# IPython
-profile_default/
-ipython_config.py
-
-# pyenv
-.python-version
-
-# celery beat schedule file
-celerybeat-schedule
-
-# SageMath parsed files
-*.sage.py
-
-# Environments
-.env
-.venv
-env/
-venv/
-ENV/
-env.bak/
-venv.bak/
-
-# Spyder project settings
-.spyderproject
-.spyproject
-
-# Rope project settings
-.ropeproject
-
-# mkdocs documentation
-/site
-
-# mypy
-.mypy_cache/
-.dmypy.json
-dmypy.json
-# Pyre type checker
-.pyre/
+### IPythonNotebook ###
+# Temporary data
+.ipynb_checkpoints/
diff --git a/rnn_classification/BaseUtil/BaseModel.py b/rnn_classification/BaseUtil/BaseModel.py
new file mode 100644
index 0000000..8939162
--- /dev/null
+++ b/rnn_classification/BaseUtil/BaseModel.py
@@ -0,0 +1,24 @@
+import abc
+
+class BaseModel(object):
+ __metaclsaa__ = abc.ABCMeta
+
+ @abc.abstractmethod
+ def instantiate_weight(self):
+ return
+
+
+ @abc.abstractmethod
+ def inference(self):
+ return
+
+
+ @abc.abstractmethod
+ def loss(self):
+ return
+
+
+ @abc.abstractmethod
+ def train(self):
+ return
+
diff --git a/rnn_classification/BaseUtil/DataUtil.py b/rnn_classification/BaseUtil/DataUtil.py
new file mode 100644
index 0000000..e69de29
diff --git a/rnn_classification/eval.py b/rnn_classification/eval.py
new file mode 100644
index 0000000..e69de29
diff --git a/rnn_classification/rnn.py b/rnn_classification/rnn.py
new file mode 100644
index 0000000..135995e
--- /dev/null
+++ b/rnn_classification/rnn.py
@@ -0,0 +1,129 @@
+import tensorflow as tf
+import numpy as np
+from tensorflow.contrib import rnn
+from BaseUtil.BaseModel import BaseModel
+
+
+class RNN(BaseModel):
+ """
+ A RNN class for sentence classification
+ With an embedding layer + Bi-LSTM layer + FC layer + softmax
+ """
+ def __init__(self, sequence_length, num_classes, vocab_size,
+ embed_size, learning_rate, decay_steps, decay_rate,
+ hidden_size, is_training, l2_lambda, grad_clip,
+ initializer=tf.random_normal_initializer(stddev=0.1)):
+ """
+ :param sequence_length:
+ :param num_classes:
+ :param vocab_size:
+ :param embedding_size:
+ :param learning_rate:
+ :param decay_steps:
+ :param decay_rate:
+ :param hidden_size:
+ :param is_training:
+ :param l2_lambda:
+ :param grad_clip:
+ :param initializer:
+ """
+ self.num_classes = num_classes
+ self.learning_rate = learning_rate
+ self.decay_steps = decay_steps
+ self.decay_rate = decay_rate
+ self.sequence_length = sequence_length
+ self.vocab_size = vocab_size
+ self.embed_size = embed_size
+ self.hidden_size = hidden_size
+ self.is_training = is_training
+ self.l2_lambda = l2_lambda
+ self.grad_clip = grad_clip
+ self.initializer = initializer
+
+ # define placeholder
+ self.input_x = tf.placeholder(tf.int32, [None, sequence_length], name='input_x')
+ self.input_y = tf.placeholder(tf.float32, [None, num_classes], name='input_y')
+ self.dropout_keep_prob = tf.placeholder(tf.float32, name='dropout_keep_prob')
+
+ self.global_step = tf.Variable(0, name='global_step', trainable=False)
+ self.epoch_step = tf.Variable(0, name='epoch_step', trainable=False)
+ self.epoch_increment = tf.assign(self.epoch_step, tf.add(self.epoch_step, tf.constant(1)))
+
+ self.instantiate_weight()
+ self.logits = self.inference()
+
+ self.loss_val = self.loss()
+ self.train_op = self.train()
+ self.predictions = tf.argmax(self.logits, axis=1, name='predictions')
+
+ correct_prediction = tf.equal(self.predictions, tf.argmax(self.input_y, 1))
+ self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, 'float'), name='accuracy')
+
+
+ def instantiate_weight(self):
+ """define all the weights"""
+ with tf.name_scope('weights'):
+ self.Embedding = tf.get_variable('Embedding',shape=[self.vocab_size,self.embed_size],
+ initializer=self.initializer)
+ self.W_projection = tf.get_variable('W_projection', shape=[self.hidden_size * 2, self.num_classes],
+ initializer=self.initializer)
+ self.b_projection = tf.get_variable('b_projection', shape=[self.num_classes])
+
+
+ def inference(self):
+ """
+ 1. embedding layer
+ 2. Bi-LSTM layer
+ 3. concat Bi-LSTM output
+ 4. FC(full connected) layer
+ 5. softmax layer
+ """
+ # embedding layer
+ with tf.name_scope('embedding'):
+ self.embedded_words = tf.nn.embedding_lookup(self.Embedding, self.input_x)
+
+ # Bi-LSTM layer
+ with tf.name_scope('Bi-LSTM'):
+ lstm_fw_cell = rnn.BasicLSTMCell(self.hidden_size)
+ lstm_bw_cell = rnn.BasicLSTMCell(self.hidden_size)
+
+ if self.dropout_keep_prob is not None:
+ lstm_fw_cell = rnn.DropoutWrapper(lstm_fw_cell, output_keep_prob=self.dropout_keep_prob)
+ lstm_bw_cell = rnn.DropoutWrapper(lstm_bw_cell, output_keep_prob=self.dropout_keep_prob)
+
+ outputs, output_states = tf.nn.bidirectional_dynamic_rnn(lstm_fw_cell, lstm_bw_cell,
+ self.embedded_words,
+ dtype=tf.float32)
+ output = tf.concat(outputs, axis=2)
+ output_last = tf.reduce_mean(output, axis=1)
+
+ # FC layer
+ with tf.name_scope('output'):
+ self.score = tf.matmul(output_last, self.W_projection) + self.b_projection
+ return self.score
+
+ def loss(self):
+ # loss
+ with tf.name_scope('loss'):
+ losses = tf.nn.softmax_cross_entropy_with_logits(labels=self.input_y, logits=self.score)
+ data_loss = tf.reduce_mean(losses)
+ l2_loss = tf.add_n([tf.nn.l2_loss(cand_v) for cand_v in tf.trainable_variables()
+ if 'bias' not in cand_v.name]) * self.l2_lambda
+ data_loss += l2_loss
+ return data_loss
+
+ def train(self):
+ learning_rate = tf.train.exponential_decay(self.learning_rate, self.global_step,
+ self.decay_steps, self.decay_rate, staircase=True)
+ optimizer = tf.train.AdamOptimizer(learning_rate)
+ grads_and_vars = optimizer.compute_gradients(self.loss_val)
+
+ #for idx, (grad, var) in enumerate(grads_and_vars):
+ #if grad is not None:
+ #grads_and_vars[idx] = (tf.clip_by_global_norm(grad, self.grad_clip), var)
+ grads_and_vars = [(tf.clip_by_norm(grad, self.grad_clip), val) for grad, val in grads_and_vars]
+ #grads_and_vars = [(tf.add(grad, tf.random_normal(tf.shape(grad), stddev=self.config.grad_noise)), val) for grad, val in
+ #gvs]
+ train_op = optimizer.apply_gradients(grads_and_vars, global_step=self.global_step)
+ return train_op
+
diff --git a/rnn_classification/run/1542522367.8184037/text_rnn_checkpoint/checkpoint b/rnn_classification/run/1542522367.8184037/text_rnn_checkpoint/checkpoint
new file mode 100644
index 0000000..50246d3
--- /dev/null
+++ b/rnn_classification/run/1542522367.8184037/text_rnn_checkpoint/checkpoint
@@ -0,0 +1,2 @@
+model_checkpoint_path: "D:\\NLP\\NLP_projects\\text_classification\\rnn_classification\\run\\1542522367.8184037\\text_rnn_checkpoint\\model-5760"
+all_model_checkpoint_paths: "D:\\NLP\\NLP_projects\\text_classification\\rnn_classification\\run\\1542522367.8184037\\text_rnn_checkpoint\\model-5760"
diff --git a/rnn_classification/run/1542522367.8184037/text_rnn_checkpoint/model-5760.data-00000-of-00001 b/rnn_classification/run/1542522367.8184037/text_rnn_checkpoint/model-5760.data-00000-of-00001
new file mode 100644
index 0000000..3ce2de8
Binary files /dev/null and b/rnn_classification/run/1542522367.8184037/text_rnn_checkpoint/model-5760.data-00000-of-00001 differ
diff --git a/rnn_classification/run/1542522367.8184037/text_rnn_checkpoint/model-5760.index b/rnn_classification/run/1542522367.8184037/text_rnn_checkpoint/model-5760.index
new file mode 100644
index 0000000..5335536
Binary files /dev/null and b/rnn_classification/run/1542522367.8184037/text_rnn_checkpoint/model-5760.index differ
diff --git a/rnn_classification/run/1542522367.8184037/text_rnn_checkpoint/model-5760.meta b/rnn_classification/run/1542522367.8184037/text_rnn_checkpoint/model-5760.meta
new file mode 100644
index 0000000..d29e48e
Binary files /dev/null and b/rnn_classification/run/1542522367.8184037/text_rnn_checkpoint/model-5760.meta differ
diff --git a/rnn_classification/run/1542522367.8184037/vocab b/rnn_classification/run/1542522367.8184037/vocab
new file mode 100644
index 0000000..4e8ae01
Binary files /dev/null and b/rnn_classification/run/1542522367.8184037/vocab differ
diff --git a/rnn_classification/train.py b/rnn_classification/train.py
new file mode 100644
index 0000000..c9f00c3
--- /dev/null
+++ b/rnn_classification/train.py
@@ -0,0 +1,185 @@
+import tensorflow as tf
+import numpy as np
+import os
+import time
+import datetime
+from cnn_classification import data_process
+from rnn import RNN
+from tensorflow.contrib import learn
+
+
+# define parameters
+
+#data load params
+tf.flags.DEFINE_float("dev_sample_percentage", .1, "Percentage of the training data to use for validation")
+tf.flags.DEFINE_string("positive_data_file", "../cnn_classification/data/rt-polarity.pos", "Data source for the positive data.")
+tf.flags.DEFINE_string("negative_data_file", "../cnn_classification/data/rt-polarity.neg", "Data source for the negative data.")
+
+#configuration
+tf.flags.DEFINE_float("learning_rate", 0.01, "learning rate")
+tf.flags.DEFINE_integer("num_epochs", 60, "embedding size")
+tf.flags.DEFINE_integer("batch_size", 100, "Batch size for training/evaluating.") #批处理的大小 32-->128
+
+tf.flags.DEFINE_integer("decay_steps", 12000, "how many steps before decay learning rate.")
+tf.flags.DEFINE_float("decay_rate", 0.9, "Rate of decay for learning rate.") # 0.5一次衰减多少
+
+tf.flags.DEFINE_string("ckpt_dir", "text_rnn_checkpoint/", "checkpoint location for the model")
+tf.flags.DEFINE_integer('num_checkpoints', 10, 'save checkpoints count')
+
+tf.flags.DEFINE_integer("sequence_length", 300, "max sentence length")
+tf.flags.DEFINE_integer("embed_size", 128, "embedding size")
+tf.flags.DEFINE_integer('hidden_size', 128, 'cell output size')
+
+tf.flags.DEFINE_boolean("is_training", True, "is traning.true:tranining,false:testing/inference")
+
+tf.flags.DEFINE_integer("validate_every", 5, "Validate every validate_every epochs.") #每10轮做一次验证
+# tf.flags.DEFINE_float('validation_percentage',0.1,'validat data percentage in train data')
+tf.flags.DEFINE_integer('dev_sample_max_cnt', 1000, 'max cnt of validation samples, dev samples cnt too large will case high loader')
+
+tf.flags.DEFINE_float("dropout_keep_prob", 0.5, "Dropout keep probability (default: 0.5)")
+
+tf.flags.DEFINE_float("l2_reg_lambda", 0.0001, "L2 regularization lambda (default: 0.0)")
+
+tf.flags.DEFINE_float('grad_clip', 5.0, 'grad_clip')
+
+tf.flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement")
+tf.flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices")
+
+FLAGS = tf.flags.FLAGS
+
+def prepocess():
+ """
+ For load and process data
+ :return:
+ """
+ print("Loading data...")
+ x_text, y = data_process.load_data_and_labels(FLAGS.positive_data_file, FLAGS.negative_data_file)
+ # bulid vocabulary
+ max_document_length = max(len(x.split(' ')) for x in x_text)
+ vocab_processor = learn.preprocessing.VocabularyProcessor(max_document_length)
+ x = np.array(list(vocab_processor.fit_transform(x_text)))
+
+ # shuffle
+ np.random.seed(10)
+ shuffle_indices = np.random.permutation(np.arange(len(y)))
+ x_shuffled = x[shuffle_indices]
+ y_shuffled = y[shuffle_indices]
+
+ # split train/test dataset
+ dev_sample_index = -1 * int(FLAGS.dev_sample_percentage * float(len(y)))
+ x_train, x_dev = x_shuffled[:dev_sample_index], x_shuffled[dev_sample_index:]
+ y_train, y_dev = y_shuffled[:dev_sample_index], y_shuffled[dev_sample_index:]
+ del x, y, x_shuffled, y_shuffled
+
+ print('Vocabulary Size: {:d}'.format(len(vocab_processor.vocabulary_)))
+ print('Train/Dev split: {:d}/{:d}'.format(len(y_train), len(y_dev)))
+ return x_train, y_train, vocab_processor, x_dev, y_dev
+
+def train(x_train, y_train, vocab_processor, x_dev, y_dev):
+ with tf.Graph().as_default():
+ session_conf = tf.ConfigProto(
+ # allows TensorFlow to fall back on a device with a certain operation implemented
+ allow_soft_placement= FLAGS.allow_soft_placement,
+ # allows TensorFlow log on which devices (CPU or GPU) it places operations
+ log_device_placement=FLAGS.log_device_placement
+ )
+ sess = tf.Session(config=session_conf)
+ with sess.as_default():
+ # initialize cnn
+ rnn = RNN(sequence_length=x_train.shape[1],
+ num_classes=y_train.shape[1],
+ vocab_size=len(vocab_processor.vocabulary_),
+ embed_size=FLAGS.embed_size,
+ l2_lambda=FLAGS.l2_reg_lambda,
+ is_training=True,
+ grad_clip=FLAGS.grad_clip,
+ learning_rate=FLAGS.learning_rate,
+ decay_steps=FLAGS.decay_steps,
+ decay_rate=FLAGS.decay_rate,
+ hidden_size=FLAGS.hidden_size
+ )
+
+
+ # output dir for models and summaries
+ timestamp = str(time.time())
+ out_dir = os.path.abspath(os.path.join(os.path.curdir, 'run', timestamp))
+ if not os.path.exists(out_dir):
+ os.makedirs(out_dir)
+ print('Writing to {} \n'.format(out_dir))
+
+ # checkpoint dir. checkpointing – saving the parameters of your model to restore them later on.
+ checkpoint_dir = os.path.abspath(os.path.join(out_dir, FLAGS.ckpt_dir))
+ checkpoint_prefix = os.path.join(checkpoint_dir, 'model')
+ if not os.path.exists(checkpoint_dir):
+ os.makedirs(checkpoint_dir)
+ saver = tf.train.Saver(tf.global_variables(), max_to_keep=FLAGS.num_checkpoints)
+
+ # Write vocabulary
+ vocab_processor.save(os.path.join(out_dir, 'vocab'))
+
+ # Initialize all
+ sess.run(tf.global_variables_initializer())
+
+
+ def train_step(x_batch, y_batch):
+ """
+ A single training step
+ :param x_batch:
+ :param y_batch:
+ :return:
+ """
+ feed_dict = {
+ rnn.input_x: x_batch,
+ rnn.input_y: y_batch,
+ rnn.dropout_keep_prob: FLAGS.dropout_keep_prob
+ }
+ _, step, loss, accuracy = sess.run(
+ [rnn.train_op, rnn.global_step, rnn.loss_val, rnn.accuracy],
+ feed_dict=feed_dict
+ )
+ time_str = datetime.datetime.now().isoformat()
+ print("{}: step {}, loss {:g}, acc {:g}".format(time_str, step, loss, accuracy))
+
+
+ def dev_step(x_batch, y_batch):
+ """
+ Evaluate model on a dev set
+ Disable dropout
+ :param x_batch:
+ :param y_batch:
+ :param writer:
+ :return:
+ """
+ feed_dict = {
+ rnn.input_x: x_batch,
+ rnn.input_y: y_batch,
+ rnn.dropout_keep_prob: 1.0
+ }
+ step, loss, accuracy = sess.run(
+ [rnn.global_step, rnn.loss_val, rnn.accuracy],
+ feed_dict=feed_dict
+ )
+ time_str = datetime.datetime.now().isoformat()
+ print("dev results:{}: step {}, loss {:g}, acc {:g}".format(time_str, step, loss, accuracy))
+
+ # generate batches
+ batches = data_process.batch_iter(list(zip(x_train, y_train)), FLAGS.batch_size, FLAGS.num_epochs)
+ # training loop
+ for batch in batches:
+ x_batch, y_batch = zip(*batch)
+ train_step(x_batch, y_batch)
+ current_step = tf.train.global_step(sess, rnn.global_step)
+ if current_step % FLAGS.validate_every == 0:
+ print('\n Evaluation:')
+ dev_step(x_dev, y_dev)
+ print('')
+
+ path = saver.save(sess, checkpoint_prefix, global_step=current_step)
+ print('Save model checkpoint to {} \n'.format(path))
+
+def main(argv=None):
+ x_train, y_train, vocab_processor, x_dev, y_dev = prepocess()
+ train(x_train, y_train, vocab_processor, x_dev, y_dev)
+
+if __name__ == '__main__':
+ tf.app.run()
\ No newline at end of file