-
Notifications
You must be signed in to change notification settings - Fork 224
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Make compatible with Python 2 and 3.
- Loading branch information
1 parent
4d43b65
commit 5908b4c
Showing
19 changed files
with
283 additions
and
124 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -8,7 +8,7 @@ | |
# Last Modified : Thu 19 Mar 2015 09:13:28 PM PDT | ||
# Authors : Hao Fang <[email protected]> and Tsung-Yi Lin <[email protected]> | ||
|
||
from bleu_scorer import BleuScorer | ||
from .bleu_scorer import BleuScorer | ||
|
||
|
||
class Bleu: | ||
|
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -7,7 +7,7 @@ | |
# | ||
# Authors: Ramakrishna Vedantam <[email protected]> and Tsung-Yi Lin <[email protected]> | ||
|
||
from cider_scorer import CiderScorer | ||
from .cider_scorer import CiderScorer | ||
import pdb | ||
|
||
class Cider: | ||
|
@@ -51,4 +51,4 @@ def compute_score(self, gts, res): | |
return score, scores | ||
|
||
def method(self): | ||
return "CIDEr" | ||
return "CIDEr" |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -3,10 +3,12 @@ | |
# Ramakrishna Vedantam <[email protected]> | ||
|
||
import copy | ||
import math | ||
from collections import defaultdict | ||
|
||
import numpy as np | ||
import pdb | ||
import math | ||
from six.moves import xrange as range | ||
import six | ||
|
||
def precook(s, n=4, out=False): | ||
""" | ||
|
@@ -19,8 +21,8 @@ def precook(s, n=4, out=False): | |
""" | ||
words = s.split() | ||
counts = defaultdict(int) | ||
for k in xrange(1,n+1): | ||
for i in xrange(len(words)-k+1): | ||
for k in range(1,n+1): | ||
for i in range(len(words)-k+1): | ||
ngram = tuple(words[i:i+k]) | ||
counts[ngram] += 1 | ||
return counts | ||
|
@@ -99,7 +101,7 @@ def compute_doc_freq(self): | |
''' | ||
for refs in self.crefs: | ||
# refs, k ref captions of one image | ||
for ngram in set([ngram for ref in refs for (ngram,count) in ref.iteritems()]): | ||
for ngram in set([ngram for ref in refs for (ngram,count) in six.iteritems(ref)]): | ||
self.document_frequency[ngram] += 1 | ||
# maxcounts[ngram] = max(maxcounts.get(ngram,0), count) | ||
|
||
|
@@ -115,7 +117,7 @@ def counts2vec(cnts): | |
vec = [defaultdict(float) for _ in range(self.n)] | ||
length = 0 | ||
norm = [0.0 for _ in range(self.n)] | ||
for (ngram,term_freq) in cnts.iteritems(): | ||
for (ngram,term_freq) in six.iteritems(cnts): | ||
# give word count 1 if it doesn't appear in reference corpus | ||
df = np.log(max(1.0, self.document_frequency[ngram])) | ||
# ngram index | ||
|
@@ -146,7 +148,7 @@ def sim(vec_hyp, vec_ref, norm_hyp, norm_ref, length_hyp, length_ref): | |
val = np.array([0.0 for _ in range(self.n)]) | ||
for n in range(self.n): | ||
# ngram | ||
for (ngram,count) in vec_hyp[n].iteritems(): | ||
for (ngram,count) in six.iteritems(vec_hyp[n]): | ||
# vrama91 : added clipping | ||
val[n] += min(vec_hyp[n][ngram], vec_ref[n][ngram]) * vec_ref[n][ngram] | ||
|
||
|
Oops, something went wrong.