diff --git a/backoff/_async.py b/backoff/_async.py index 38cde8e..8434870 100644 --- a/backoff/_async.py +++ b/backoff/_async.py @@ -21,26 +21,30 @@ def _ensure_coroutines(coros_or_funcs): return [_ensure_coroutine(f) for f in coros_or_funcs] -async def _call_handlers(hdlrs, target, args, kwargs, tries, elapsed, **extra): - details = { - 'target': target, - 'args': args, - 'kwargs': kwargs, - 'tries': tries, - 'elapsed': elapsed, - } +async def _call_handlers(hdlrs, details, **extra): + details = dict(details) details.update(extra) for hdlr in hdlrs: await hdlr(details) -def retry_predicate(target, wait_gen, predicate, - max_tries, max_time, jitter, - on_success, on_backoff, on_giveup, - wait_gen_kwargs): - on_success = _ensure_coroutines(on_success) +def retry_predicate( + target, + wait_gen, + predicate, + max_tries, + max_time, + jitter, + on_try, + on_backoff, + on_giveup, + on_success, + wait_gen_kwargs, +): + on_try = _ensure_coroutines(on_try) on_backoff = _ensure_coroutines(on_backoff) on_giveup = _ensure_coroutines(on_giveup) + on_success = _ensure_coroutines(on_success) # Easy to implement, please report if you need this. assert not asyncio.iscoroutinefunction(max_tries) @@ -55,31 +59,43 @@ async def retry(*args, **kwargs): max_tries_ = _maybe_call(max_tries) max_time_ = _maybe_call(max_time) - tries = 0 + details = { + "target": target, + "args": args, + "kwargs": kwargs, + "tries": 0 + } + start = datetime.datetime.now() wait = _init_wait_gen(wait_gen, wait_gen_kwargs) while True: - tries += 1 - elapsed = timedelta.total_seconds(datetime.datetime.now() - start) - details = (target, args, kwargs, tries, elapsed) + now = datetime.datetime.now() + details["elapsed"] = timedelta.total_seconds(now - start) + await _call_handlers(on_try, details) ret = await target(*args, **kwargs) + details["tries"] += 1 if predicate(ret): - max_tries_exceeded = (tries == max_tries_) + max_tries_exceeded = (details["tries"] == max_tries_) max_time_exceeded = (max_time_ is not None and - elapsed >= max_time_) + details["elapsed"] >= max_time_) if max_tries_exceeded or max_time_exceeded: - await _call_handlers(on_giveup, *details, value=ret) + await _call_handlers(on_giveup, details, value=ret) break try: - seconds = _next_wait(wait, jitter, elapsed, max_time_) + seconds = _next_wait( + wait, + jitter, + details["elapsed"], + max_time_ + ) except StopIteration: - await _call_handlers(on_giveup, *details, value=ret) + await _call_handlers(on_giveup, details, value=ret) break - await _call_handlers(on_backoff, *details, value=ret, + await _call_handlers(on_backoff, details, value=ret, wait=seconds) # Note: there is no convenient way to pass explicit event @@ -94,7 +110,7 @@ async def retry(*args, **kwargs): await asyncio.sleep(seconds) continue else: - await _call_handlers(on_success, *details, value=ret) + await _call_handlers(on_success, details, value=ret) break return ret @@ -102,13 +118,24 @@ async def retry(*args, **kwargs): return retry -def retry_exception(target, wait_gen, exception, - max_tries, max_time, jitter, giveup, - on_success, on_backoff, on_giveup, - wait_gen_kwargs): - on_success = _ensure_coroutines(on_success) +def retry_exception( + target, + wait_gen, + exception, + max_tries, + max_time, + jitter, + giveup, + on_try, + on_backoff, + on_giveup, + on_success, + wait_gen_kwargs, +): + on_try = _ensure_coroutines(on_try) on_backoff = _ensure_coroutines(on_backoff) on_giveup = _ensure_coroutines(on_giveup) + on_success = _ensure_coroutines(on_success) giveup = _ensure_coroutine(giveup) # Easy to implement, please report if you need this. @@ -121,33 +148,44 @@ async def retry(*args, **kwargs): max_tries_ = _maybe_call(max_tries) max_time_ = _maybe_call(max_time) - tries = 0 + details = { + "target": target, + "args": args, + "kwargs": kwargs, + "tries": 0 + } start = datetime.datetime.now() wait = _init_wait_gen(wait_gen, wait_gen_kwargs) while True: - tries += 1 - elapsed = timedelta.total_seconds(datetime.datetime.now() - start) - details = (target, args, kwargs, tries, elapsed) + now = datetime.datetime.now() + details["elapsed"] = timedelta.total_seconds(now - start) + await _call_handlers(on_try, details) try: ret = await target(*args, **kwargs) except exception as e: + details["tries"] += 1 giveup_result = await giveup(e) - max_tries_exceeded = (tries == max_tries_) + max_tries_exceeded = (details["tries"] == max_tries_) max_time_exceeded = (max_time_ is not None and - elapsed >= max_time_) + details["elapsed"] >= max_time_) if giveup_result or max_tries_exceeded or max_time_exceeded: - await _call_handlers(on_giveup, *details) + await _call_handlers(on_giveup, details) raise try: - seconds = _next_wait(wait, jitter, elapsed, max_time_) + seconds = _next_wait( + wait, + jitter, + details["elapsed"], + max_time_ + ) except StopIteration: - await _call_handlers(on_giveup, *details) + await _call_handlers(on_giveup, details) raise e - await _call_handlers(on_backoff, *details, wait=seconds) + await _call_handlers(on_backoff, details, wait=seconds) # Note: there is no convenient way to pass explicit event # loop to decorator, so here we assume that either default @@ -160,7 +198,8 @@ async def retry(*args, **kwargs): # await asyncio.sleep(seconds) else: - await _call_handlers(on_success, *details) + details["tries"] += 1 + await _call_handlers(on_success, details) return ret return retry diff --git a/backoff/_decorator.py b/backoff/_decorator.py index e541904..c953e71 100644 --- a/backoff/_decorator.py +++ b/backoff/_decorator.py @@ -17,16 +17,19 @@ basestring = str -def on_predicate(wait_gen, - predicate=operator.not_, - max_tries=None, - max_time=None, - jitter=full_jitter, - on_success=None, - on_backoff=None, - on_giveup=None, - logger='backoff', - **wait_gen_kwargs): +def on_predicate( + wait_gen, + predicate=operator.not_, + max_tries=None, + max_time=None, + jitter=full_jitter, + on_try=None, + on_backoff=None, + on_giveup=None, + on_success=None, + logger='backoff', + **wait_gen_kwargs +): """Returns decorator for backoff and retry triggered by predicate. Args: @@ -51,9 +54,9 @@ def on_predicate(wait_gen, concurrent clients. Wait times are jittered by default using the full_jitter function. Jittering may be disabled altogether by passing jitter=None. - on_success: Callable (or iterable of callables) with a unary - signature to be called in the event of success. The - parameter is a dict containing details about the invocation. + on_try: Callable (or iterable of callables) with a unary + signature to be called before each attempt. The parameter + is a dict containing details about the invocation. on_backoff: Callable (or iterable of callables) with a unary signature to be called in the event of a backoff. The parameter is a dict containing details about the invocation. @@ -61,6 +64,9 @@ def on_predicate(wait_gen, signature to be called in the event that max_tries is exceeded. The parameter is a dict containing details about the invocation. + on_success: Callable (or iterable of callables) with a unary + signature to be called in the event of success. The + parameter is a dict containing details about the invocation. logger: Name of logger or Logger object to log to. Defaults to 'backoff'. **wait_gen_kwargs: Any additional keyword args specified will be @@ -68,14 +74,16 @@ def on_predicate(wait_gen, args will first be evaluated and their return values passed. This is useful for runtime configuration. """ + def decorate(target): # change names because python 2.x doesn't have nonlocal logger_ = logger if isinstance(logger_, basestring): logger_ = logging.getLogger(logger_) - on_success_ = _config_handlers(on_success) + on_try_ = _config_handlers(on_try) on_backoff_ = _config_handlers(on_backoff, _log_backoff, logger_) on_giveup_ = _config_handlers(on_giveup, _log_giveup, logger_) + on_success_ = _config_handlers(on_success) retry = None if sys.version_info >= (3, 5): # pragma: python=3.5 @@ -83,31 +91,44 @@ def decorate(target): if asyncio.iscoroutinefunction(target): import backoff._async + retry = backoff._async.retry_predicate if retry is None: retry = _sync.retry_predicate - return retry(target, wait_gen, predicate, - max_tries, max_time, jitter, - on_success_, on_backoff_, on_giveup_, - wait_gen_kwargs) + return retry( + target, + wait_gen, + predicate, + max_tries, + max_time, + jitter, + on_try_, + on_backoff_, + on_giveup_, + on_success_, + wait_gen_kwargs, + ) # Return a function which decorates a target with a retry loop. return decorate -def on_exception(wait_gen, - exception, - max_tries=None, - max_time=None, - jitter=full_jitter, - giveup=lambda e: False, - on_success=None, - on_backoff=None, - on_giveup=None, - logger='backoff', - **wait_gen_kwargs): +def on_exception( + wait_gen, + exception, + max_tries=None, + max_time=None, + jitter=full_jitter, + giveup=lambda e: False, + on_try=None, + on_backoff=None, + on_giveup=None, + on_success=None, + logger='backoff', + **wait_gen_kwargs +): """Returns decorator for backoff and retry triggered by exception. Args: @@ -133,9 +154,9 @@ def on_exception(wait_gen, giveup: Function accepting an exception instance and returning whether or not to give up. Optional. The default is to always continue. - on_success: Callable (or iterable of callables) with a unary - signature to be called in the event of success. The - parameter is a dict containing details about the invocation. + on_try: Callable (or iterable of callables) with a unary + signature to be called before each attempt. The parameter + is a dict containing details about the invocation. on_backoff: Callable (or iterable of callables) with a unary signature to be called in the event of a backoff. The parameter is a dict containing details about the invocation. @@ -143,36 +164,52 @@ def on_exception(wait_gen, signature to be called in the event that max_tries is exceeded. The parameter is a dict containing details about the invocation. + on_success: Callable (or iterable of callables) with a unary + signature to be called in the event of success. The + parameter is a dict containing details about the invocation. logger: Name or Logger object to log to. Defaults to 'backoff'. **wait_gen_kwargs: Any additional keyword args specified will be passed to wait_gen when it is initialized. Any callable args will first be evaluated and their return values passed. This is useful for runtime configuration. """ + def decorate(target): # change names because python 2.x doesn't have nonlocal logger_ = logger if isinstance(logger_, basestring): logger_ = logging.getLogger(logger_) - on_success_ = _config_handlers(on_success) + on_try_ = _config_handlers(on_try) on_backoff_ = _config_handlers(on_backoff, _log_backoff, logger_) on_giveup_ = _config_handlers(on_giveup, _log_giveup, logger_) + on_success_ = _config_handlers(on_success) retry = None - if sys.version_info[:2] >= (3, 5): # pragma: python=3.5 + if sys.version_info[:2] >= (3, 5): # pragma: python=3.5 import asyncio if asyncio.iscoroutinefunction(target): import backoff._async + retry = backoff._async.retry_exception if retry is None: retry = _sync.retry_exception - return retry(target, wait_gen, exception, - max_tries, max_time, jitter, giveup, - on_success_, on_backoff_, on_giveup_, - wait_gen_kwargs) + return retry( + target, + wait_gen, + exception, + max_tries, + max_time, + jitter, + giveup, + on_try_, + on_backoff_, + on_giveup_, + on_success_, + wait_gen_kwargs, + ) # Return a function which decorates a target with a retry loop. return decorate diff --git a/backoff/_sync.py b/backoff/_sync.py index 477765d..eea4c3a 100644 --- a/backoff/_sync.py +++ b/backoff/_sync.py @@ -7,24 +7,26 @@ from backoff._common import (_init_wait_gen, _maybe_call, _next_wait) -def _call_handlers(hdlrs, target, args, kwargs, tries, elapsed, **extra): - details = { - 'target': target, - 'args': args, - 'kwargs': kwargs, - 'tries': tries, - 'elapsed': elapsed, - } +def _call_handlers(hdlrs, details, **extra): + details = dict(details) details.update(extra) for hdlr in hdlrs: hdlr(details) -def retry_predicate(target, wait_gen, predicate, - max_tries, max_time, jitter, - on_success, on_backoff, on_giveup, - wait_gen_kwargs): - +def retry_predicate( + target, + wait_gen, + predicate, + max_tries, + max_time, + jitter, + on_try, + on_backoff, + on_giveup, + on_success, + wait_gen_kwargs, +): @functools.wraps(target) def retry(*args, **kwargs): @@ -32,37 +34,48 @@ def retry(*args, **kwargs): max_tries_ = _maybe_call(max_tries) max_time_ = _maybe_call(max_time) - tries = 0 + details = { + "target": target, + "args": args, + "kwargs": kwargs, + "tries": 0 + } start = datetime.datetime.now() wait = _init_wait_gen(wait_gen, wait_gen_kwargs) while True: - tries += 1 - elapsed = timedelta.total_seconds(datetime.datetime.now() - start) - details = (target, args, kwargs, tries, elapsed) + now = datetime.datetime.now() + details["elapsed"] = timedelta.total_seconds(now - start) + _call_handlers(on_try, details) ret = target(*args, **kwargs) + details["tries"] += 1 if predicate(ret): - max_tries_exceeded = (tries == max_tries_) + max_tries_exceeded = (details["tries"] == max_tries_) max_time_exceeded = (max_time_ is not None and - elapsed >= max_time_) + details["elapsed"] >= max_time_) if max_tries_exceeded or max_time_exceeded: - _call_handlers(on_giveup, *details, value=ret) + _call_handlers(on_giveup, details, value=ret) break try: - seconds = _next_wait(wait, jitter, elapsed, max_time_) + seconds = _next_wait( + wait, + jitter, + details["elapsed"], + max_time_ + ) except StopIteration: - _call_handlers(on_giveup, *details) + _call_handlers(on_giveup, details, value=ret) break - _call_handlers(on_backoff, *details, + _call_handlers(on_backoff, details, value=ret, wait=seconds) time.sleep(seconds) continue else: - _call_handlers(on_success, *details, value=ret) + _call_handlers(on_success, details, value=ret) break return ret @@ -70,11 +83,20 @@ def retry(*args, **kwargs): return retry -def retry_exception(target, wait_gen, exception, - max_tries, max_time, jitter, giveup, - on_success, on_backoff, on_giveup, - wait_gen_kwargs): - +def retry_exception( + target, + wait_gen, + exception, + max_tries, + max_time, + jitter, + giveup, + on_try, + on_backoff, + on_giveup, + on_success, + wait_gen_kwargs, +): @functools.wraps(target) def retry(*args, **kwargs): @@ -82,36 +104,48 @@ def retry(*args, **kwargs): max_tries_ = _maybe_call(max_tries) max_time_ = _maybe_call(max_time) - tries = 0 + details = { + "target": target, + "args": args, + "kwargs": kwargs, + "tries": 0 + } start = datetime.datetime.now() wait = _init_wait_gen(wait_gen, wait_gen_kwargs) while True: - tries += 1 - elapsed = timedelta.total_seconds(datetime.datetime.now() - start) - details = (target, args, kwargs, tries, elapsed) + now = datetime.datetime.now() + details["elapsed"] = timedelta.total_seconds(now - start) + _call_handlers(on_try, details) try: ret = target(*args, **kwargs) except exception as e: - max_tries_exceeded = (tries == max_tries_) + details["tries"] += 1 + max_tries_exceeded = (details["tries"] == max_tries_) max_time_exceeded = (max_time_ is not None and - elapsed >= max_time_) + details["elapsed"] >= max_time_) if giveup(e) or max_tries_exceeded or max_time_exceeded: - _call_handlers(on_giveup, *details) + _call_handlers(on_giveup, details) raise try: - seconds = _next_wait(wait, jitter, elapsed, max_time_) + seconds = _next_wait( + wait, + jitter, + details["elapsed"], + max_time_ + ) except StopIteration: - _call_handlers(on_giveup, *details) + _call_handlers(on_giveup, details) raise e - _call_handlers(on_backoff, *details, wait=seconds) + _call_handlers(on_backoff, details, wait=seconds) time.sleep(seconds) else: - _call_handlers(on_success, *details) + details["tries"] += 1 + _call_handlers(on_success, details) return ret return retry diff --git a/tests/common.py b/tests/common.py index 56c2a8d..196e29d 100644 --- a/tests/common.py +++ b/tests/common.py @@ -3,18 +3,25 @@ import functools -# create event handler which log their invocations to a dict -def _log_hdlrs(): +def _logging_handlers(): + """ + Setup up some handlers which log events for testing. + + Returns: + log - a log mapping events to details + kwargs - handler kwargs suitable to passing to the decorators + """ log = collections.defaultdict(list) - def log_hdlr(event, details): + def handler(event, details): log[event].append(details) - log_success = functools.partial(log_hdlr, 'success') - log_backoff = functools.partial(log_hdlr, 'backoff') - log_giveup = functools.partial(log_hdlr, 'giveup') + handlers = { + "on_" + event: functools.partial(handler, event) + for event in ["try", "backoff", "giveup", "success"] + } - return log, log_success, log_backoff, log_giveup + return log, handlers # decorator that that saves the target as diff --git a/tests/python35/test_backoff_async.py b/tests/python35/test_backoff_async.py index ca62c4a..083f5f4 100644 --- a/tests/python35/test_backoff_async.py +++ b/tests/python35/test_backoff_async.py @@ -1,11 +1,12 @@ # coding:utf-8 import asyncio # Python 3.5 code and syntax is allowed in this file -import backoff import pytest import random -from tests.common import _log_hdlrs, _save_target +import backoff + +from tests.common import _logging_handlers, _save_target async def _await_none(x): @@ -133,15 +134,13 @@ async def endless_exceptions(): async def test_on_exception_success_random_jitter(monkeypatch): monkeypatch.setattr('asyncio.sleep', _await_none) - log, log_success, log_backoff, log_giveup = _log_hdlrs() + log, handlers = _logging_handlers() @backoff.on_exception(backoff.expo, Exception, - on_success=log_success, - on_backoff=log_backoff, - on_giveup=log_giveup, jitter=backoff.random_jitter, - factor=0.5) + factor=0.5, + **handlers) @_save_target async def succeeder(*args, **kwargs): # succeed after we've backed off twice @@ -151,9 +150,10 @@ async def succeeder(*args, **kwargs): await succeeder(1, 2, 3, foo=1, bar=2) # we try 3 times, backing off twice before succeeding - assert len(log['success']) == 1 + assert len(log["try"]) == 3 assert len(log['backoff']) == 2 assert len(log['giveup']) == 0 + assert len(log['success']) == 1 for i in range(2): details = log['backoff'][i] @@ -164,15 +164,13 @@ async def succeeder(*args, **kwargs): async def test_on_exception_success_full_jitter(monkeypatch): monkeypatch.setattr('asyncio.sleep', _await_none) - log, log_success, log_backoff, log_giveup = _log_hdlrs() + log, handlers = _logging_handlers() @backoff.on_exception(backoff.expo, Exception, - on_success=log_success, - on_backoff=log_backoff, - on_giveup=log_giveup, jitter=backoff.full_jitter, - factor=0.5) + factor=0.5, + **handlers) @_save_target async def succeeder(*args, **kwargs): # succeed after we've backed off twice @@ -182,9 +180,10 @@ async def succeeder(*args, **kwargs): await succeeder(1, 2, 3, foo=1, bar=2) # we try 3 times, backing off twice before succeeding - assert len(log['success']) == 1 + assert len(log["try"]) == 3 assert len(log['backoff']) == 2 assert len(log['giveup']) == 0 + assert len(log['success']) == 1 for i in range(2): details = log['backoff'][i] @@ -193,15 +192,13 @@ async def succeeder(*args, **kwargs): @pytest.mark.asyncio async def test_on_exception_success(): - log, log_success, log_backoff, log_giveup = _log_hdlrs() + log, handlers = _logging_handlers() @backoff.on_exception(backoff.constant, Exception, - on_success=log_success, - on_backoff=log_backoff, - on_giveup=log_giveup, jitter=None, - interval=0) + interval=0, + **handlers) @_save_target async def succeeder(*args, **kwargs): # succeed after we've backed off twice @@ -236,16 +233,14 @@ async def succeeder(*args, **kwargs): @pytest.mark.asyncio async def test_on_exception_giveup(): - log, log_success, log_backoff, log_giveup = _log_hdlrs() + log, handlers = _logging_handlers() @backoff.on_exception(backoff.constant, ValueError, - on_success=log_success, - on_backoff=log_backoff, - on_giveup=log_giveup, max_tries=3, jitter=None, - interval=0) + interval=0, + **handlers) @_save_target async def exceptor(*args, **kwargs): raise ValueError("catch me") @@ -254,6 +249,7 @@ async def exceptor(*args, **kwargs): await exceptor(1, 2, 3, foo=1, bar=2) # we try 3 times, backing off twice and giving up once + assert len(log['try']) == 3 assert len(log['success']) == 0 assert len(log['backoff']) == 2 assert len(log['giveup']) == 1 @@ -311,14 +307,12 @@ async def foo_bar_baz(): @pytest.mark.asyncio async def test_on_predicate_success(): - log, log_success, log_backoff, log_giveup = _log_hdlrs() + log, handlers = _logging_handlers() @backoff.on_predicate(backoff.constant, - on_success=log_success, - on_backoff=log_backoff, - on_giveup=log_giveup, jitter=None, - interval=0) + interval=0, + **handlers) @_save_target async def success(*args, **kwargs): # succeed after we've backed off twice @@ -327,6 +321,7 @@ async def success(*args, **kwargs): await success(1, 2, 3, foo=1, bar=2) # we try 3 times, backing off twice before succeeding + assert len(log['try']) == 3 assert len(log['success']) == 1 assert len(log['backoff']) == 2 assert len(log['giveup']) == 0 @@ -354,15 +349,13 @@ async def success(*args, **kwargs): @pytest.mark.asyncio async def test_on_predicate_giveup(): - log, log_success, log_backoff, log_giveup = _log_hdlrs() + log, handlers = _logging_handlers() @backoff.on_predicate(backoff.constant, - on_success=log_success, - on_backoff=log_backoff, - on_giveup=log_giveup, max_tries=3, jitter=None, - interval=0) + interval=0, + **handlers) @_save_target async def emptiness(*args, **kwargs): pass @@ -386,34 +379,70 @@ async def emptiness(*args, **kwargs): @pytest.mark.asyncio async def test_on_predicate_iterable_handlers(): - hdlrs = [_log_hdlrs() for _ in range(3)] + attempts1 = [] + attempts2 = [] + backoffs1 = [] + backoffs2 = [] + giveups1 = [] + giveups2 = [] + successes1 = [] + successes2 = [] + + def on_try1(details): + attempts1.append(details) + + def on_try2(details): + attempts2.append(details) + + def on_backoff1(details): + backoffs1.append(details) + + def on_backoff2(details): + backoffs2.append(details) + + def on_giveup1(details): + giveups1.append(details) + + def on_giveup2(details): + giveups2.append(details) + + def on_success1(details): + successes1.append(details) + + def on_success2(details): + successes2.append(details) @backoff.on_predicate(backoff.constant, - on_success=(h[1] for h in hdlrs), - on_backoff=(h[2] for h in hdlrs), - on_giveup=(h[3] for h in hdlrs), max_tries=3, jitter=None, - interval=0) + interval=0, + on_try=[on_try1, on_try2], + on_backoff=[on_backoff1, on_backoff2], + on_giveup=[on_giveup1, on_giveup2], + on_success=[on_success1, on_success2]) @_save_target async def emptiness(*args, **kwargs): pass await emptiness(1, 2, 3, foo=1, bar=2) - for i in range(3): - assert len(hdlrs[i][0]['success']) == 0 - assert len(hdlrs[i][0]['backoff']) == 2 - assert len(hdlrs[i][0]['giveup']) == 1 + assert len(attempts1) == 3 + assert len(attempts2) == 3 + assert len(backoffs1) == 2 + assert len(backoffs2) == 2 + assert len(giveups1) == 1 + assert len(giveups2) == 1 + assert len(successes1) == 0 + assert len(successes2) == 0 - details = dict(hdlrs[i][0]['giveup'][0]) - elapsed = details.pop('elapsed') - assert isinstance(elapsed, float) - assert details == {'args': (1, 2, 3), - 'kwargs': {'foo': 1, 'bar': 2}, - 'target': emptiness._target, - 'tries': 3, - 'value': None} + details = dict(giveups1[0]) + elapsed = details.pop('elapsed') + assert isinstance(elapsed, float) + assert details == {'args': (1, 2, 3), + 'kwargs': {'foo': 1, 'bar': 2}, + 'target': emptiness._target, + 'tries': 3, + 'value': None} @pytest.mark.asyncio @@ -453,15 +482,13 @@ async def test_on_exception_success_0_arg_jitter(monkeypatch): monkeypatch.setattr('asyncio.sleep', _await_none) monkeypatch.setattr('random.random', lambda: 0) - log, log_success, log_backoff, log_giveup = _log_hdlrs() + log, handlers = _logging_handlers() @backoff.on_exception(backoff.constant, Exception, - on_success=log_success, - on_backoff=log_backoff, - on_giveup=log_giveup, jitter=random.random, - interval=0) + interval=0, + **handlers) @_save_target async def succeeder(*args, **kwargs): # succeed after we've backed off twice @@ -472,9 +499,10 @@ async def succeeder(*args, **kwargs): await succeeder(1, 2, 3, foo=1, bar=2) # we try 3 times, backing off twice before succeeding - assert len(log['success']) == 1 + assert len(log["try"]) == 3 assert len(log['backoff']) == 2 assert len(log['giveup']) == 0 + assert len(log['success']) == 1 for i in range(2): details = log['backoff'][i] @@ -502,14 +530,13 @@ async def test_on_predicate_success_0_arg_jitter(monkeypatch): monkeypatch.setattr('asyncio.sleep', _await_none) monkeypatch.setattr('random.random', lambda: 0) - log, log_success, log_backoff, log_giveup = _log_hdlrs() + log, handlers = _logging_handlers() @backoff.on_predicate(backoff.constant, - on_success=log_success, - on_backoff=log_backoff, - on_giveup=log_giveup, jitter=random.random, - interval=0) + interval=0, + **handlers + ) @_save_target async def success(*args, **kwargs): # succeed after we've backed off twice @@ -519,9 +546,10 @@ async def success(*args, **kwargs): await success(1, 2, 3, foo=1, bar=2) # we try 3 times, backing off twice before succeeding - assert len(log['success']) == 1 + assert len(log["try"]) == 3 assert len(log['backoff']) == 2 assert len(log['giveup']) == 0 + assert len(log['success']) == 1 for i in range(2): details = log['backoff'][i]