|
| 1 | +# coding: utf-8 |
| 2 | +# Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved. |
| 3 | + |
| 4 | +from . import retry_checkers |
| 5 | + |
| 6 | +import random |
| 7 | +import time |
| 8 | + |
| 9 | + |
| 10 | +class RetryStrategyBuilder(object): |
| 11 | + """ |
| 12 | + A class which can build a retry strategy based on provided criteria. Criteria can be provided at construction time or |
| 13 | + afterwards via using the add_* (to add/enable criteria) and no_* (to disable/remove criteria) methods. |
| 14 | +
|
| 15 | + When calculating the delay between retries, we use exponential backoff with full jitter as the default strategy |
| 16 | + vended by this builder. |
| 17 | + """ |
| 18 | + BACKOFF_FULL_JITTER = 'full_jitter' |
| 19 | + |
| 20 | + def __init__(self, **kwargs): |
| 21 | + """ |
| 22 | + Creates a new builder and initializes it based on any provided parameters. |
| 23 | +
|
| 24 | + :param Boolean max_attempts_check (optional): |
| 25 | + Whether to enable a check that we don't exceed a certain number of attempts. If not provided |
| 26 | + this defaults to False (i.e. this check will not be done) |
| 27 | +
|
| 28 | + :param Boolean service_error_check (optional): |
| 29 | + Whether to enable a check that will retry on connection errors, timeouts and service errors |
| 30 | + which match given combinations of HTTP statuses and textual error codes. If not provided |
| 31 | + this defaults to False (i.e. this check will not be done) |
| 32 | +
|
| 33 | + :param int max_atttemps (optional): |
| 34 | + If we are checking that we don't exceed a certain number of attempts, what that number of |
| 35 | + attempts should be. This only applies if we are performing a check on the maximum number of |
| 36 | + attempts and will be ignored otherwise. If we are performing a check on the maximum number of |
| 37 | + attempts and this value is not provided, we will default to a maximum of 5 attempts |
| 38 | +
|
| 39 | + :param dict service_error_retry_config (optional): |
| 40 | + If we are checking on service errors, we can configure what HTTP statuses (e.g. 429) to retry on and, optionally, |
| 41 | + whether the textual code (e.g. TooManyRequests) matches a given value. |
| 42 | +
|
| 43 | + This is a dictionary where the key is an integer representing the HTTP status, and the value is a list(str) where we |
| 44 | + will test if the textual code in the service error is a member of the list. If an empty list is provided, then only |
| 45 | + the numeric status is checked for retry purposes. |
| 46 | +
|
| 47 | + If we are performing a check on service errors and this value is not provided, then by default we will retry on |
| 48 | + HTTP 429's (throttles) without any textual code check. |
| 49 | +
|
| 50 | + :param Boolean service_error_retry_on_any_5xx (optional): |
| 51 | + If we are checking on service errors, whether to retry on any HTTP 5xx received from the service. If |
| 52 | + we are performing a check on service errors and this value is not provided, it defaults to True (retry on any 5xx) |
| 53 | +
|
| 54 | + :param int retry_base_sleep_time_millis (optional): |
| 55 | + For exponential backoff with jitter, the base time to use in our retry calculation in milliseconds. If not |
| 56 | + provided, this value defaults to 1000ms (i.e. 1 second) |
| 57 | +
|
| 58 | + :param int retry_exponential_growth_factor (optional): |
| 59 | + For exponential backoff with jitter, the exponent which we will raise to the power of the number of attempts. If |
| 60 | + not provided, this value defaults to 2 |
| 61 | +
|
| 62 | + :param int retry_max_wait_time_millis (optional): |
| 63 | + For exponential backoff with jitter, the maximum amount of time to wait between retries. If not provided, this |
| 64 | + value defaults to 8000ms (i.e. 8 seconds) |
| 65 | +
|
| 66 | + :param str backoff_type (optional): |
| 67 | + The type of backoff we want to do (e.g. full jitter). Currently the only supported value is 'full_jitter' (the convenience |
| 68 | + constant BACKOFF_FULL_JITTER in this class can also be used) |
| 69 | + """ |
| 70 | + |
| 71 | + self.max_attempts_check = kwargs.get('max_attempts_check', False) |
| 72 | + self.service_error_check = kwargs.get('service_error_check', False) |
| 73 | + |
| 74 | + self.max_attempts = kwargs.get('max_attempts', None) |
| 75 | + self.service_error_retry_config = kwargs.get('service_error_retry_config', {}) |
| 76 | + self.service_error_retry_on_any_5xx = kwargs.get('service_error_retry_on_any_5xx', True) |
| 77 | + |
| 78 | + self.retry_base_sleep_time_millis = kwargs.get('retry_base_sleep_time_millis', 1000) |
| 79 | + self.retry_exponential_growth_factor = kwargs.get('retry_exponential_growth_factor', 2) |
| 80 | + self.retry_max_wait_time_millis = kwargs.get('retry_max_wait_time_millis', 8000) |
| 81 | + |
| 82 | + if 'backoff_type' in kwargs and kwargs['backoff_type'] != self.BACKOFF_FULL_JITTER: |
| 83 | + raise ValueError('Currently full_jitter is the only supported backoff type') |
| 84 | + |
| 85 | + def add_max_attempts(self, max_attempts=None): |
| 86 | + self.max_attempts_check = True |
| 87 | + if max_attempts: |
| 88 | + self.max_attempts = max_attempts |
| 89 | + return self |
| 90 | + |
| 91 | + def no_max_attemps(self): |
| 92 | + self.max_attempts_check = False |
| 93 | + return self |
| 94 | + |
| 95 | + def add_service_error_check(self, **kwargs): |
| 96 | + self.service_error_check = True |
| 97 | + |
| 98 | + if 'service_error_retry_config' in kwargs: |
| 99 | + self.service_error_retry_config = kwargs['service_error_retry_config'] |
| 100 | + elif 'service_error_status' in kwargs and 'service_error_codes' in kwargs: |
| 101 | + self.service_error_retry_config[kwargs['service_error_status']] = kwargs['service_error_codes'] |
| 102 | + |
| 103 | + if 'service_error_retry_on_any_5xx' in kwargs: |
| 104 | + self.service_error_retry_on_any_5xx = kwargs['service_error_retry_on_any_5xx'] |
| 105 | + |
| 106 | + return self |
| 107 | + |
| 108 | + def no_service_error_check(self): |
| 109 | + self.service_error_check = False |
| 110 | + return self |
| 111 | + |
| 112 | + def get_retry_strategy(self): |
| 113 | + checkers = [] |
| 114 | + |
| 115 | + if self.max_attempts_check: |
| 116 | + if self.max_attempts: |
| 117 | + checkers.append(retry_checkers.LimitBasedRetryChecker(max_attempts=self.max_attempts)) |
| 118 | + else: |
| 119 | + checkers.append(retry_checkers.LimitBasedRetryChecker()) |
| 120 | + |
| 121 | + if self.service_error_check: |
| 122 | + if self.service_error_retry_config: |
| 123 | + checkers.append( |
| 124 | + retry_checkers.TimeoutConnectionAndServiceErrorRetryChecker( |
| 125 | + service_error_retry_config=self.service_error_retry_config, |
| 126 | + retry_any_5xx=self.service_error_retry_on_any_5xx |
| 127 | + ) |
| 128 | + ) |
| 129 | + else: |
| 130 | + checkers.append(retry_checkers.TimeoutConnectionAndServiceErrorRetryChecker(retry_any_5xx=self.service_error_retry_on_any_5xx)) |
| 131 | + |
| 132 | + checker_container = retry_checkers.RetryCheckerContainer(checkers=checkers) |
| 133 | + |
| 134 | + return ExponentialBackoffWithFullJitterRetryStrategy( |
| 135 | + base_sleep_time_millis=self.retry_base_sleep_time_millis, |
| 136 | + exponent_growth_factor=self.retry_exponential_growth_factor, |
| 137 | + max_wait_millis=self.retry_max_wait_time_millis, |
| 138 | + checker_container=checker_container |
| 139 | + ) |
| 140 | + |
| 141 | + |
| 142 | +class ExponentialBackoffWithFullJitterRetryStrategy(object): |
| 143 | + """ |
| 144 | + A retry strategy which does exponential backoff and full jitter. Times used are in milliseconds and |
| 145 | + the strategy can be described as: |
| 146 | +
|
| 147 | + .. code-block:: none |
| 148 | +
|
| 149 | + random(0, min(base_sleep_time_millis * exponent_growth_factor ** (attempt), max_wait_millis)) |
| 150 | +
|
| 151 | + """ |
| 152 | + |
| 153 | + def __init__(self, base_sleep_time_millis, exponent_growth_factor, max_wait_millis, checker_container, **kwargs): |
| 154 | + """ |
| 155 | + Creates a new instance of an exponential backoff with full jitter retry strategy. |
| 156 | +
|
| 157 | + :param int base_sleep_time_millis: |
| 158 | + The base amount to sleep by, in milliseconds |
| 159 | +
|
| 160 | + :param int exponent_growth_factor: |
| 161 | + The exponent part of our backoff. We will raise take this value and raising it to the power |
| 162 | + of attemps and then multiply this with base_sleep_time_millis |
| 163 | +
|
| 164 | + :param int max_wait_millis: |
| 165 | + The maximum time we will wait between calls |
| 166 | +
|
| 167 | + :param retry_checkers.RetryCheckerContainer checker_container: |
| 168 | + The checks to run to determine whether a failed call should be retried |
| 169 | + """ |
| 170 | + self.base_sleep_time_millis = base_sleep_time_millis |
| 171 | + self.exponent_growth_factor = exponent_growth_factor |
| 172 | + self.max_wait_millis = max_wait_millis |
| 173 | + self.checkers = checker_container |
| 174 | + |
| 175 | + def make_retrying_call(self, func_ref, *func_args, **func_kwargs): |
| 176 | + """ |
| 177 | + Calls the function given by func_ref. Any positional (*func_args) and keyword (**func_kwargs) |
| 178 | + arguments are passed as-is to func_ref. |
| 179 | +
|
| 180 | + :param function func_ref: |
| 181 | + The function that we should call with retries |
| 182 | +
|
| 183 | + :return: the result of calling func_ref |
| 184 | + """ |
| 185 | + should_retry = True |
| 186 | + attempt = 0 |
| 187 | + while should_retry: |
| 188 | + try: |
| 189 | + return func_ref(*func_args, **func_kwargs) |
| 190 | + except Exception as e: |
| 191 | + attempt += 1 |
| 192 | + if self.checkers.should_retry(exception=e, current_attempt=attempt): |
| 193 | + self.do_sleep(attempt) |
| 194 | + else: |
| 195 | + raise |
| 196 | + |
| 197 | + def do_sleep(self, attempt): |
| 198 | + sleep_time_millis = random.uniform(0, min(self.base_sleep_time_millis * (self.exponent_growth_factor ** attempt), self.max_wait_millis)) |
| 199 | + time.sleep(sleep_time_millis / 1000.0) # time.sleep needs seconds, but can take fractional seconds |
0 commit comments