Skip to content

Commit 2b5bbe3

Browse files
authored
Merge pull request #42 from oracle/release_2018-01-11
Add pagination module files to 1.3.12 release
2 parents 090bcd5 + e4a9659 commit 2b5bbe3

File tree

5 files changed

+555
-0
lines changed

5 files changed

+555
-0
lines changed

src/oci/pagination/__init__.py

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,6 @@
1+
# coding: utf-8
2+
# Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
3+
4+
from .pagination_utils import list_call_get_all_results, list_call_get_up_to_limit, list_call_get_all_results_generator, list_call_get_up_to_limit_generator
5+
6+
__all__ = ["list_call_get_all_results", "list_call_get_up_to_limit", "list_call_get_all_results_generator", "list_call_get_up_to_limit_generator"]
Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,6 @@
1+
# coding: utf-8
2+
# Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
3+
4+
from . import retry
5+
6+
__all__ = ["retry"]

src/oci/pagination/internal/retry.py

Lines changed: 199 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,199 @@
1+
# coding: utf-8
2+
# Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
3+
4+
from . import retry_checkers
5+
6+
import random
7+
import time
8+
9+
10+
class RetryStrategyBuilder(object):
11+
"""
12+
A class which can build a retry strategy based on provided criteria. Criteria can be provided at construction time or
13+
afterwards via using the add_* (to add/enable criteria) and no_* (to disable/remove criteria) methods.
14+
15+
When calculating the delay between retries, we use exponential backoff with full jitter as the default strategy
16+
vended by this builder.
17+
"""
18+
BACKOFF_FULL_JITTER = 'full_jitter'
19+
20+
def __init__(self, **kwargs):
21+
"""
22+
Creates a new builder and initializes it based on any provided parameters.
23+
24+
:param Boolean max_attempts_check (optional):
25+
Whether to enable a check that we don't exceed a certain number of attempts. If not provided
26+
this defaults to False (i.e. this check will not be done)
27+
28+
:param Boolean service_error_check (optional):
29+
Whether to enable a check that will retry on connection errors, timeouts and service errors
30+
which match given combinations of HTTP statuses and textual error codes. If not provided
31+
this defaults to False (i.e. this check will not be done)
32+
33+
:param int max_atttemps (optional):
34+
If we are checking that we don't exceed a certain number of attempts, what that number of
35+
attempts should be. This only applies if we are performing a check on the maximum number of
36+
attempts and will be ignored otherwise. If we are performing a check on the maximum number of
37+
attempts and this value is not provided, we will default to a maximum of 5 attempts
38+
39+
:param dict service_error_retry_config (optional):
40+
If we are checking on service errors, we can configure what HTTP statuses (e.g. 429) to retry on and, optionally,
41+
whether the textual code (e.g. TooManyRequests) matches a given value.
42+
43+
This is a dictionary where the key is an integer representing the HTTP status, and the value is a list(str) where we
44+
will test if the textual code in the service error is a member of the list. If an empty list is provided, then only
45+
the numeric status is checked for retry purposes.
46+
47+
If we are performing a check on service errors and this value is not provided, then by default we will retry on
48+
HTTP 429's (throttles) without any textual code check.
49+
50+
:param Boolean service_error_retry_on_any_5xx (optional):
51+
If we are checking on service errors, whether to retry on any HTTP 5xx received from the service. If
52+
we are performing a check on service errors and this value is not provided, it defaults to True (retry on any 5xx)
53+
54+
:param int retry_base_sleep_time_millis (optional):
55+
For exponential backoff with jitter, the base time to use in our retry calculation in milliseconds. If not
56+
provided, this value defaults to 1000ms (i.e. 1 second)
57+
58+
:param int retry_exponential_growth_factor (optional):
59+
For exponential backoff with jitter, the exponent which we will raise to the power of the number of attempts. If
60+
not provided, this value defaults to 2
61+
62+
:param int retry_max_wait_time_millis (optional):
63+
For exponential backoff with jitter, the maximum amount of time to wait between retries. If not provided, this
64+
value defaults to 8000ms (i.e. 8 seconds)
65+
66+
:param str backoff_type (optional):
67+
The type of backoff we want to do (e.g. full jitter). Currently the only supported value is 'full_jitter' (the convenience
68+
constant BACKOFF_FULL_JITTER in this class can also be used)
69+
"""
70+
71+
self.max_attempts_check = kwargs.get('max_attempts_check', False)
72+
self.service_error_check = kwargs.get('service_error_check', False)
73+
74+
self.max_attempts = kwargs.get('max_attempts', None)
75+
self.service_error_retry_config = kwargs.get('service_error_retry_config', {})
76+
self.service_error_retry_on_any_5xx = kwargs.get('service_error_retry_on_any_5xx', True)
77+
78+
self.retry_base_sleep_time_millis = kwargs.get('retry_base_sleep_time_millis', 1000)
79+
self.retry_exponential_growth_factor = kwargs.get('retry_exponential_growth_factor', 2)
80+
self.retry_max_wait_time_millis = kwargs.get('retry_max_wait_time_millis', 8000)
81+
82+
if 'backoff_type' in kwargs and kwargs['backoff_type'] != self.BACKOFF_FULL_JITTER:
83+
raise ValueError('Currently full_jitter is the only supported backoff type')
84+
85+
def add_max_attempts(self, max_attempts=None):
86+
self.max_attempts_check = True
87+
if max_attempts:
88+
self.max_attempts = max_attempts
89+
return self
90+
91+
def no_max_attemps(self):
92+
self.max_attempts_check = False
93+
return self
94+
95+
def add_service_error_check(self, **kwargs):
96+
self.service_error_check = True
97+
98+
if 'service_error_retry_config' in kwargs:
99+
self.service_error_retry_config = kwargs['service_error_retry_config']
100+
elif 'service_error_status' in kwargs and 'service_error_codes' in kwargs:
101+
self.service_error_retry_config[kwargs['service_error_status']] = kwargs['service_error_codes']
102+
103+
if 'service_error_retry_on_any_5xx' in kwargs:
104+
self.service_error_retry_on_any_5xx = kwargs['service_error_retry_on_any_5xx']
105+
106+
return self
107+
108+
def no_service_error_check(self):
109+
self.service_error_check = False
110+
return self
111+
112+
def get_retry_strategy(self):
113+
checkers = []
114+
115+
if self.max_attempts_check:
116+
if self.max_attempts:
117+
checkers.append(retry_checkers.LimitBasedRetryChecker(max_attempts=self.max_attempts))
118+
else:
119+
checkers.append(retry_checkers.LimitBasedRetryChecker())
120+
121+
if self.service_error_check:
122+
if self.service_error_retry_config:
123+
checkers.append(
124+
retry_checkers.TimeoutConnectionAndServiceErrorRetryChecker(
125+
service_error_retry_config=self.service_error_retry_config,
126+
retry_any_5xx=self.service_error_retry_on_any_5xx
127+
)
128+
)
129+
else:
130+
checkers.append(retry_checkers.TimeoutConnectionAndServiceErrorRetryChecker(retry_any_5xx=self.service_error_retry_on_any_5xx))
131+
132+
checker_container = retry_checkers.RetryCheckerContainer(checkers=checkers)
133+
134+
return ExponentialBackoffWithFullJitterRetryStrategy(
135+
base_sleep_time_millis=self.retry_base_sleep_time_millis,
136+
exponent_growth_factor=self.retry_exponential_growth_factor,
137+
max_wait_millis=self.retry_max_wait_time_millis,
138+
checker_container=checker_container
139+
)
140+
141+
142+
class ExponentialBackoffWithFullJitterRetryStrategy(object):
143+
"""
144+
A retry strategy which does exponential backoff and full jitter. Times used are in milliseconds and
145+
the strategy can be described as:
146+
147+
.. code-block:: none
148+
149+
random(0, min(base_sleep_time_millis * exponent_growth_factor ** (attempt), max_wait_millis))
150+
151+
"""
152+
153+
def __init__(self, base_sleep_time_millis, exponent_growth_factor, max_wait_millis, checker_container, **kwargs):
154+
"""
155+
Creates a new instance of an exponential backoff with full jitter retry strategy.
156+
157+
:param int base_sleep_time_millis:
158+
The base amount to sleep by, in milliseconds
159+
160+
:param int exponent_growth_factor:
161+
The exponent part of our backoff. We will raise take this value and raising it to the power
162+
of attemps and then multiply this with base_sleep_time_millis
163+
164+
:param int max_wait_millis:
165+
The maximum time we will wait between calls
166+
167+
:param retry_checkers.RetryCheckerContainer checker_container:
168+
The checks to run to determine whether a failed call should be retried
169+
"""
170+
self.base_sleep_time_millis = base_sleep_time_millis
171+
self.exponent_growth_factor = exponent_growth_factor
172+
self.max_wait_millis = max_wait_millis
173+
self.checkers = checker_container
174+
175+
def make_retrying_call(self, func_ref, *func_args, **func_kwargs):
176+
"""
177+
Calls the function given by func_ref. Any positional (*func_args) and keyword (**func_kwargs)
178+
arguments are passed as-is to func_ref.
179+
180+
:param function func_ref:
181+
The function that we should call with retries
182+
183+
:return: the result of calling func_ref
184+
"""
185+
should_retry = True
186+
attempt = 0
187+
while should_retry:
188+
try:
189+
return func_ref(*func_args, **func_kwargs)
190+
except Exception as e:
191+
attempt += 1
192+
if self.checkers.should_retry(exception=e, current_attempt=attempt):
193+
self.do_sleep(attempt)
194+
else:
195+
raise
196+
197+
def do_sleep(self, attempt):
198+
sleep_time_millis = random.uniform(0, min(self.base_sleep_time_millis * (self.exponent_growth_factor ** attempt), self.max_wait_millis))
199+
time.sleep(sleep_time_millis / 1000.0) # time.sleep needs seconds, but can take fractional seconds
Lines changed: 150 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,150 @@
1+
# coding: utf-8
2+
# Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
3+
#
4+
# Contains helper classes that can say whether a retry should occur based on various criteria, such as a maximum number of retries being
5+
# hit or the exception received from a service call (or the response from the service call if it didn't exception out).
6+
7+
from ...exceptions import ServiceError
8+
from requests.exceptions import Timeout
9+
from requests.exceptions import ConnectionError
10+
11+
12+
class RetryCheckerContainer(object):
13+
"""
14+
A container which holds at least one retry checker. This lets us chain together different retry checkers into an overall
15+
evaluation of whether we should retry a request.
16+
17+
Checkers are evaluated in the order they appear in the provided list of checkers, and if one checker reports failure we
18+
consider this to be an overall failure and no more retries should happen.
19+
"""
20+
21+
def __init__(self, checkers, **kwargs):
22+
if not checkers:
23+
raise ValueError('At least one retry checker needs to be provided')
24+
self.checkers = checkers
25+
26+
def add_checker(self, checker):
27+
self.checkers.append(checker)
28+
29+
def should_retry(self, exception=None, response=None, **kwargs):
30+
"""
31+
Determines if a retry should be performed based on either an exception or a response. We will
32+
retry if all the checkers held in this container indicate that they should retry; if any checker
33+
indicates that the call should not be retried then we will not retry.
34+
35+
:param Exception exception:
36+
An exception received from the service
37+
38+
:param Response response:
39+
The :class:`~oci.response.Response` received from a service call
40+
41+
:return: True if we should retry, and False otherwise
42+
:rtype: Boolean
43+
"""
44+
for c in self.checkers:
45+
if not c.should_retry(exception, response, **kwargs):
46+
return False
47+
48+
return True
49+
50+
51+
class BaseRetryChecker(object):
52+
"""
53+
The base class from which all retry checkers should derive. This has no implementation but just defines the contract
54+
for a checker.
55+
"""
56+
57+
def __init__(self, **kwargs):
58+
pass
59+
60+
def should_retry(self, exception=None, response=None, **kwargs):
61+
"""
62+
Determines if a retry should be performed based on either an exception or a response.
63+
64+
:param Exception exception:
65+
An exception received from the service
66+
67+
:param Response response:
68+
The :class:`~oci.response.Response` received from a service call
69+
70+
:return: True if we should retry, and False otherwise
71+
:rtype: Boolean
72+
"""
73+
raise NotImplementedError('Subclasses should implement this')
74+
75+
76+
class LimitBasedRetryChecker(BaseRetryChecker):
77+
"""
78+
A retry checker which can retry as long as some threshold (# of attempts/tries) has not been breached.
79+
It is the repsonsibility of the caller to track how many attempts/tries it has done - objects of this
80+
class will not track this.
81+
82+
If not specified, the default number of tries allowed is 5. Tries are also assumed to be one-based (i.e. the
83+
first attempt/try is 1, the second is 2 etc)
84+
"""
85+
86+
def __init__(self, max_attempts=5, **kwargs):
87+
if max_attempts < 1:
88+
raise ValueError('The max number of attempts must be >= 1, with 1 indicating no retries')
89+
90+
super(LimitBasedRetryChecker, self).__init__(**kwargs)
91+
self.max_attempts = max_attempts
92+
93+
def should_retry(self, exception=None, response=None, **kwargs):
94+
return self.max_attempts > kwargs.get('current_attempt', 0)
95+
96+
97+
class TimeoutConnectionAndServiceErrorRetryChecker(BaseRetryChecker):
98+
RETRYABLE_STATUSES_AND_CODES = {
99+
-1: [],
100+
429: []
101+
}
102+
103+
"""
104+
A checker which will retry on certain exceptions. Retries are enabled for the following exception types:
105+
106+
- Timeouts from the requests library (we will always retry on these)
107+
- ConnectionErrors from the requests library (we will always retry on these)
108+
- Service errors where the status is 500 or above (i.e. a server-side error)
109+
- Service errors where a status (e.g. 429) and, optionally, the code meet a given criteria
110+
111+
The last item is configurable via dictionary where the key is some numeric status representing a HTTP status and the value
112+
is a list of strings with each string representing a textual error code (such as those error codes documented at
113+
https://docs.us-phoenix-1.oraclecloud.com/Content/API/References/apierrors.htm). If an empty list is provided, then
114+
only the numeric status is checked for retry purposes. For a populated array, we are looking for where the numeric status matches
115+
and the code from the exception appears in the array. As an example:
116+
117+
.. code-block:: python
118+
119+
{
120+
400: ['QuotaExceeded'],
121+
500: []
122+
}
123+
124+
If no configuration is provided, then the default for service errors is to retry on HTTP 429's and 5xx's without any code checks. If a
125+
specific 5xx code (e.g. 500, 502) is provided in the dictionary then it takes precedence over the option to retry on any 500. For example
126+
it is possible to retry on only 502s (either by status or by status and matching some code ) by disabling the general "retry on any 5xx"
127+
configuration and placing an entry for 502 in the dictionary
128+
"""
129+
130+
def __init__(self, service_error_retry_config=RETRYABLE_STATUSES_AND_CODES, retry_any_5xx=True, **kwargs):
131+
super(TimeoutConnectionAndServiceErrorRetryChecker, self).__init__(**kwargs)
132+
self.retry_any_5xx = retry_any_5xx
133+
self.service_error_retry_config = service_error_retry_config
134+
135+
def should_retry(self, exception=None, response=None, **kwargs):
136+
if isinstance(exception, Timeout):
137+
return True
138+
elif isinstance(exception, ConnectionError):
139+
return True
140+
elif isinstance(exception, ServiceError):
141+
if exception.status in self.service_error_retry_config:
142+
codes = self.service_error_retry_config[exception.status]
143+
if not codes:
144+
return True
145+
else:
146+
return exception.code in codes
147+
elif self.retry_any_5xx and exception.status >= 500:
148+
return True
149+
150+
return False

0 commit comments

Comments
 (0)