From abe7bbb3cb92d15a14559fd3b3dd5bce0e8f5464 Mon Sep 17 00:00:00 2001 From: attardi Date: Thu, 11 Apr 2019 11:24:16 +0200 Subject: [PATCH 1/3] Fix to pickle read. --- barchybrid/src/parser.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/barchybrid/src/parser.py b/barchybrid/src/parser.py index e2d60bf..d7dd0f4 100644 --- a/barchybrid/src/parser.py +++ b/barchybrid/src/parser.py @@ -95,7 +95,7 @@ def run(experiment,options): params = os.path.join(experiment.modeldir,options.params) print('Reading params from ' + params) - with open(params, 'r') as paramsfp: + with open(params, 'rb') as paramsfp: stored_vocab, stored_opt = pickle.load(paramsfp) # we need to update/add certain options based on new user input From 810b4c26e289bc5abcb21613596a120d1b984a7a Mon Sep 17 00:00:00 2001 From: attardi Date: Sat, 13 Apr 2019 16:46:47 +0200 Subject: [PATCH 2/3] print --- barchybrid/src/elmo.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/barchybrid/src/elmo.py b/barchybrid/src/elmo.py index abe37c5..be9c5e1 100644 --- a/barchybrid/src/elmo.py +++ b/barchybrid/src/elmo.py @@ -8,7 +8,7 @@ class ELMo(object): def __init__(self, elmo_file, gamma=1.0, learn_gamma=False): - print "Reading ELMo embeddings from '%s'" % elmo_file + print("Reading ELMo embeddings from '%s'" % elmo_file) self.sentence_data = h5py.File(elmo_file, 'r') self.weights = [] From 49233291eea9eab07dc1e9c8637425f97f0e2817 Mon Sep 17 00:00:00 2001 From: attardi Date: Sat, 13 Apr 2019 16:50:00 +0200 Subject: [PATCH 3/3] print --- barchybrid/src/mstlstm.py | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/barchybrid/src/mstlstm.py b/barchybrid/src/mstlstm.py index 0435704..05b0470 100644 --- a/barchybrid/src/mstlstm.py +++ b/barchybrid/src/mstlstm.py @@ -70,8 +70,8 @@ def Predict(self, treebanks, datasplit, options): new_test_words = \ set(test_words) - self.feature_extractor.words.keys() - print "Number of OOV word types at test time: %i (out of %i)" % ( - len(new_test_words), len(test_words)) + print("Number of OOV word types at test time: %i (out of %i)" % ( + len(new_test_words), len(test_words))) if len(new_test_words) > 0: # no point loading embeddings if there are no words to look for @@ -84,15 +84,15 @@ def Predict(self, treebanks, datasplit, options): ) test_embeddings["words"].update(embeddings) if len(test_langs) > 1 and test_embeddings["words"]: - print "External embeddings found for %i words "\ + print("External embeddings found for %i words "\ "(out of %i)" % \ - (len(test_embeddings["words"]), len(new_test_words)) + (len(test_embeddings["words"]), len(new_test_words))) if options.char_emb_size > 0: new_test_chars = \ set(test_chars) - self.feature_extractor.chars.keys() - print "Number of OOV char types at test time: %i (out of %i)" % ( - len(new_test_chars), len(test_chars)) + print("Number of OOV char types at test time: %i (out of %i)" % ( + len(new_test_chars), len(test_chars))) if len(new_test_chars) > 0: for lang in test_langs: @@ -105,9 +105,9 @@ def Predict(self, treebanks, datasplit, options): ) test_embeddings["chars"].update(embeddings) if len(test_langs) > 1 and test_embeddings["chars"]: - print "External embeddings found for %i chars "\ + print("External embeddings found for %i chars "\ "(out of %i)" % \ - (len(test_embeddings["chars"]), len(new_test_chars)) + (len(test_embeddings["chars"]), len(new_test_chars))) data = utils.read_conll_dir(treebanks,datasplit,char_map=char_map) for iSentence, osentence in enumerate(data,1): @@ -124,7 +124,7 @@ def Predict(self, treebanks, datasplit, options): ## ADD for handling multi-roots problem rootHead = [head for head in heads if head==0] if len(rootHead) != 1: - print "it has multi-root, changing it for heading first root for other roots" + print("it has multi-root, changing it for heading first root for other roots") rootHead = [seq for seq, head in enumerate(heads) if head == 0] for seq in rootHead[1:]:heads[seq] = rootHead[0] ## finish to multi-roots @@ -174,7 +174,7 @@ def Train(self, trainData, options): ' Errors: %.3f'%((float(eerrors)) / etotal)+\ ' Labeled Errors: %.3f'%(float(lerrors) / etotal)+\ ' Time: %.2gs'%(time.time()-start) - print loss_message + print(loss_message) start = time.time() eerrors = 0 eloss = 0.0 @@ -244,5 +244,5 @@ def Train(self, trainData, options): dy.renew_cg() self.trainer.update() - print "Loss: ", mloss/iSentence - print "Total Training Time: %.2gs"%(time.time()-beg) + print("Loss: ", mloss/iSentence) + print("Total Training Time: %.2gs"%(time.time()-beg))