Skip to content

Commit

Permalink
[tfr] fix the weighted mrr computation for doc-level weights.
Browse files Browse the repository at this point in the history
PiperOrigin-RevId: 270370744
  • Loading branch information
xuanhuiwang authored and ramakumar1729 committed Sep 24, 2019
1 parent 1d91c5f commit 9a52663
Show file tree
Hide file tree
Showing 2 changed files with 9 additions and 4 deletions.
7 changes: 5 additions & 2 deletions tensorflow_ranking/python/metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -260,10 +260,13 @@ def compute(self, labels, predictions, weights):
# Relevance = 1.0 when labels >= 1.0 to accommodate graded relevance.
relevance = tf.cast(tf.greater_equal(sorted_labels, 1.0), dtype=tf.float32)
reciprocal_rank = 1.0 / tf.cast(tf.range(1, topn + 1), dtype=tf.float32)
# MRR has a shape of [batch_size, 1]
# MRR has a shape of [batch_size, 1].
mrr = tf.reduce_max(
input_tensor=relevance * reciprocal_rank, axis=1, keepdims=True)
return tf.compat.v1.metrics.mean(mrr * tf.ones_like(weights), weights)
per_list_weights = _per_example_weights_to_per_list_weights(
weights=weights,
relevance=tf.cast(tf.greater_equal(labels, 1.0), dtype=tf.float32))
return tf.compat.v1.metrics.mean(mrr, per_list_weights)


def mean_reciprocal_rank(labels, predictions, weights=None, name=None):
Expand Down
6 changes: 4 additions & 2 deletions tensorflow_ranking/python/metrics_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -108,7 +108,8 @@ def test_mean_reciprocal_rank(self):
self._check_metrics([
(m([labels[0]], [scores[0]]), 0.5),
(m(labels, scores), (0.5 + 1.0) / 2),
(m(labels, scores, weights), (6. * 0.5 + 15. * 1.) / (6. + 15.)),
(m(labels, scores,
weights), (3. * 0.5 + (6. + 5.) / 2. * 1.) / (3. + (6. + 5) / 2.)),
])

def test_make_mean_reciprocal_rank_fn(self):
Expand All @@ -125,7 +126,8 @@ def test_make_mean_reciprocal_rank_fn(self):
self._check_metrics([
(m([labels[0]], [scores[0]], features), 0.5),
(m(labels, scores, features), (0.5 + 1.0) / 2),
(m_w(labels, scores, features), (6. * 0.5 + 15. * 1.) / (6. + 15.)),
(m_w(labels, scores, features),
(3. * 0.5 + (6. + 5.) / 2. * 1.) / (3. + (6. + 5.) / 2.)),
])

def test_average_relevance_position(self):
Expand Down

0 comments on commit 9a52663

Please sign in to comment.