Skip to content

Commit

Permalink
tumblr_backup: fix the --count handling
Browse files Browse the repository at this point in the history
dd40a88 (Do not trust the API's 'posts' and 'liked_count' fields,
2020-04-08) introduced a regression that caused the --count to be
rounded up to the next MAX_POST multiple.

Fixes bbolli#216.
  • Loading branch information
bbolli committed Sep 21, 2020
1 parent b889f28 commit 01d3528
Showing 1 changed file with 3 additions and 8 deletions.
11 changes: 3 additions & 8 deletions tumblr_backup.py
Original file line number Diff line number Diff line change
Expand Up @@ -548,18 +548,14 @@ def backup(self, account):
# use the meta information to create a HTML header
TumblrPost.post_header = self.header(body_class='post')

# find the limit of how many posts to back up
if options.count:
desired_count = options.count + options.skip
else:
desired_count = None

# returns whether any posts from this batch were saved
def _backup(posts):
for p in sorted(posts, key=lambda x: x['id'], reverse=True):
post = post_class(p)
if ident_max and long(post.ident) <= ident_max:
return False
if options.count and self.post_count >= options.count:
return False
if options.period:
if post.date >= options.p_stop:
continue
Expand Down Expand Up @@ -589,8 +585,7 @@ def _backup(posts):
# Get the JSON entries from the API, which we can only do for MAX_POSTS posts at once.
# Posts "arrive" in reverse chronological order. Post #0 is the most recent one.
i = options.skip
# Download posts until we have `desired_count` (if specified), or until post range responses are empty
while not desired_count or self.post_count < desired_count:
while True:
# find the upper bound
log(account, "Getting posts %d to %d (of %d expected)\r" % (i, i + MAX_POSTS - 1, count_estimate))

Expand Down

0 comments on commit 01d3528

Please sign in to comment.